diff options
181 files changed, 10696 insertions, 7588 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 522ef9b4aff3..a26edcdadc21 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -123,6 +123,7 @@ gt-y += \ gt/intel_ring.o \ gt/intel_ring_submission.o \ gt/intel_rps.o \ + gt/intel_sa_media.o \ gt/intel_sseu.o \ gt/intel_sseu_debugfs.o \ gt/intel_timeline.o \ @@ -257,7 +258,8 @@ i915-y += \ display/intel_vga.o \ display/i9xx_plane.o \ display/skl_scaler.o \ - display/skl_universal_plane.o + display/skl_universal_plane.o \ + display/skl_watermark.o i915-$(CONFIG_ACPI) += \ display/intel_acpi.o \ display/intel_opregion.o diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c index 82ad8fe7440c..e3e3d27ffb53 100644 --- a/drivers/gpu/drm/i915/display/g4x_dp.c +++ b/drivers/gpu/drm/i915/display/g4x_dp.c @@ -1169,7 +1169,7 @@ intel_dp_hotplug(struct intel_encoder *encoder, static bool ibx_digital_port_connected(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - u32 bit = dev_priv->hotplug.pch_hpd[encoder->hpd_pin]; + u32 bit = dev_priv->display.hotplug.pch_hpd[encoder->hpd_pin]; return intel_de_read(dev_priv, SDEISR) & bit; } @@ -1223,7 +1223,7 @@ static bool gm45_digital_port_connected(struct intel_encoder *encoder) static bool ilk_digital_port_connected(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - u32 bit = dev_priv->hotplug.hpd[encoder->hpd_pin]; + u32 bit = dev_priv->display.hotplug.hpd[encoder->hpd_pin]; return intel_de_read(dev_priv, DEISR) & bit; } diff --git a/drivers/gpu/drm/i915/display/hsw_ips.c b/drivers/gpu/drm/i915/display/hsw_ips.c index 861dcd2eb890..a5be4af792cb 100644 --- a/drivers/gpu/drm/i915/display/hsw_ips.c +++ b/drivers/gpu/drm/i915/display/hsw_ips.c @@ -202,7 +202,7 @@ bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state) * Should measure whether using a lower cdclk w/o IPS */ if (IS_BROADWELL(i915) && - crtc_state->pixel_rate > i915->max_cdclk_freq * 95 / 100) + crtc_state->pixel_rate > i915->display.cdclk.max_cdclk_freq * 95 / 100) return false; return true; diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c index 0f35f2facdfc..5afbe3e98ee8 100644 --- a/drivers/gpu/drm/i915/display/i9xx_plane.c +++ b/drivers/gpu/drm/i915/display/i9xx_plane.c @@ -125,7 +125,7 @@ static struct intel_fbc *i9xx_plane_fbc(struct drm_i915_private *dev_priv, enum i9xx_plane_id i9xx_plane) { if (i9xx_plane_has_fbc(dev_priv, i9xx_plane)) - return dev_priv->fbc[INTEL_FBC_A]; + return dev_priv->display.fbc[INTEL_FBC_A]; else return NULL; } diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index 5dcfa7feffa9..ed4d93942dbd 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -33,6 +33,7 @@ #include "icl_dsi_regs.h" #include "intel_atomic.h" #include "intel_backlight.h" +#include "intel_backlight_regs.h" #include "intel_combo_phy.h" #include "intel_combo_phy_regs.h" #include "intel_connector.h" @@ -641,13 +642,13 @@ static void gen11_dsi_gate_clocks(struct intel_encoder *encoder) u32 tmp; enum phy phy; - mutex_lock(&dev_priv->dpll.lock); + mutex_lock(&dev_priv->display.dpll.lock); tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0); for_each_dsi_phy(phy, intel_dsi->phys) tmp |= ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, tmp); - mutex_unlock(&dev_priv->dpll.lock); + mutex_unlock(&dev_priv->display.dpll.lock); } static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder) @@ -657,13 +658,13 @@ static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder) u32 tmp; enum phy phy; - mutex_lock(&dev_priv->dpll.lock); + mutex_lock(&dev_priv->display.dpll.lock); tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0); for_each_dsi_phy(phy, intel_dsi->phys) tmp &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, tmp); - mutex_unlock(&dev_priv->dpll.lock); + mutex_unlock(&dev_priv->display.dpll.lock); } static bool gen11_dsi_is_clock_enabled(struct intel_encoder *encoder) @@ -693,7 +694,7 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder, enum phy phy; u32 val; - mutex_lock(&dev_priv->dpll.lock); + mutex_lock(&dev_priv->display.dpll.lock); val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0); for_each_dsi_phy(phy, intel_dsi->phys) { @@ -709,7 +710,7 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder, intel_de_posting_read(dev_priv, ICL_DPCLKA_CFGCR0); - mutex_unlock(&dev_priv->dpll.lock); + mutex_unlock(&dev_priv->display.dpll.lock); } static void @@ -1629,6 +1630,8 @@ static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder, /* FIXME: initialize from VBT */ vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST; + vdsc_cfg->pic_height = crtc_state->hw.adjusted_mode.crtc_vdisplay; + ret = intel_dsc_compute_params(crtc_state); if (ret) return ret; @@ -2070,8 +2073,11 @@ void icl_dsi_init(struct drm_i915_private *dev_priv) else intel_dsi->ports = BIT(port); - intel_dsi->dcs_backlight_ports = intel_connector->panel.vbt.dsi.bl_ports; - intel_dsi->dcs_cabc_ports = intel_connector->panel.vbt.dsi.cabc_ports; + if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports)) + intel_connector->panel.vbt.dsi.bl_ports &= intel_dsi->ports; + + if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports)) + intel_connector->panel.vbt.dsi.cabc_ports &= intel_dsi->ports; for_each_dsi_port(port, intel_dsi->ports) { struct intel_dsi_host *host; diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c index b94973b5633f..18f0a5ae3bac 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic.c +++ b/drivers/gpu/drm/i915/display/intel_atomic.c @@ -62,9 +62,9 @@ int intel_digital_connector_atomic_get_property(struct drm_connector *connector, struct intel_digital_connector_state *intel_conn_state = to_intel_digital_connector_state(state); - if (property == dev_priv->force_audio_property) + if (property == dev_priv->display.properties.force_audio) *val = intel_conn_state->force_audio; - else if (property == dev_priv->broadcast_rgb_property) + else if (property == dev_priv->display.properties.broadcast_rgb) *val = intel_conn_state->broadcast_rgb; else { drm_dbg_atomic(&dev_priv->drm, @@ -95,12 +95,12 @@ int intel_digital_connector_atomic_set_property(struct drm_connector *connector, struct intel_digital_connector_state *intel_conn_state = to_intel_digital_connector_state(state); - if (property == dev_priv->force_audio_property) { + if (property == dev_priv->display.properties.force_audio) { intel_conn_state->force_audio = val; return 0; } - if (property == dev_priv->broadcast_rgb_property) { + if (property == dev_priv->display.properties.broadcast_rgb) { intel_conn_state->broadcast_rgb = val; return 0; } diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c index dd876dbbaa39..aaa6708256d5 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c @@ -42,9 +42,9 @@ #include "intel_display_types.h" #include "intel_fb.h" #include "intel_fb_pin.h" -#include "intel_pm.h" #include "intel_sprite.h" #include "skl_scaler.h" +#include "skl_watermark.h" static void intel_plane_state_reset(struct intel_plane_state *plane_state, struct intel_plane *plane) diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c index 6c9ee905f132..aacbc6da84ef 100644 --- a/drivers/gpu/drm/i915/display/intel_audio.c +++ b/drivers/gpu/drm/i915/display/intel_audio.c @@ -393,7 +393,7 @@ hsw_dp_audio_config_update(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct i915_audio_component *acomp = dev_priv->audio.component; + struct i915_audio_component *acomp = dev_priv->display.audio.component; enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; enum port port = encoder->port; const struct dp_aud_n_m *nm; @@ -441,7 +441,7 @@ hsw_hdmi_audio_config_update(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct i915_audio_component *acomp = dev_priv->audio.component; + struct i915_audio_component *acomp = dev_priv->display.audio.component; enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; enum port port = encoder->port; int n, rate; @@ -496,7 +496,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder, enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; u32 tmp; - mutex_lock(&dev_priv->audio.mutex); + mutex_lock(&dev_priv->display.audio.mutex); /* Disable timestamps */ tmp = intel_de_read(dev_priv, HSW_AUD_CFG(cpu_transcoder)); @@ -514,7 +514,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder, tmp &= ~AUDIO_OUTPUT_ENABLE(cpu_transcoder); intel_de_write(dev_priv, HSW_AUD_PIN_ELD_CP_VLD, tmp); - mutex_unlock(&dev_priv->audio.mutex); + mutex_unlock(&dev_priv->display.audio.mutex); } static unsigned int calc_hblank_early_prog(struct intel_encoder *encoder, @@ -532,7 +532,7 @@ static unsigned int calc_hblank_early_prog(struct intel_encoder *encoder, h_total = crtc_state->hw.adjusted_mode.crtc_htotal; pixel_clk = crtc_state->hw.adjusted_mode.crtc_clock; vdsc_bpp = crtc_state->dsc.compressed_bpp; - cdclk = i915->cdclk.hw.cdclk; + cdclk = i915->display.cdclk.hw.cdclk; /* fec= 0.972261, using rounding multiplier of 1000000 */ fec_coeff = 972261; link_clk = crtc_state->port_clock; @@ -639,7 +639,7 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder, u32 tmp; int len, i; - mutex_lock(&dev_priv->audio.mutex); + mutex_lock(&dev_priv->display.audio.mutex); /* Enable Audio WA for 4k DSC usecases */ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP)) @@ -677,7 +677,7 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder, /* Enable timestamps */ hsw_audio_config_update(encoder, crtc_state); - mutex_unlock(&dev_priv->audio.mutex); + mutex_unlock(&dev_priv->display.audio.mutex); } static void ilk_audio_codec_disable(struct intel_encoder *encoder, @@ -814,7 +814,7 @@ void intel_audio_codec_enable(struct intel_encoder *encoder, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct i915_audio_component *acomp = dev_priv->audio.component; + struct i915_audio_component *acomp = dev_priv->display.audio.component; struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_connector *connector = conn_state->connector; const struct drm_display_mode *adjusted_mode = @@ -838,17 +838,17 @@ void intel_audio_codec_enable(struct intel_encoder *encoder, connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2; - if (dev_priv->audio.funcs) - dev_priv->audio.funcs->audio_codec_enable(encoder, - crtc_state, - conn_state); + if (dev_priv->display.funcs.audio) + dev_priv->display.funcs.audio->audio_codec_enable(encoder, + crtc_state, + conn_state); - mutex_lock(&dev_priv->audio.mutex); + mutex_lock(&dev_priv->display.audio.mutex); encoder->audio_connector = connector; /* referred in audio callbacks */ - dev_priv->audio.encoder_map[pipe] = encoder; - mutex_unlock(&dev_priv->audio.mutex); + dev_priv->display.audio.encoder_map[pipe] = encoder; + mutex_unlock(&dev_priv->display.audio.mutex); if (acomp && acomp->base.audio_ops && acomp->base.audio_ops->pin_eld_notify) { @@ -878,7 +878,7 @@ void intel_audio_codec_disable(struct intel_encoder *encoder, const struct drm_connector_state *old_conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct i915_audio_component *acomp = dev_priv->audio.component; + struct i915_audio_component *acomp = dev_priv->display.audio.component; struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); struct drm_connector *connector = old_conn_state->connector; enum port port = encoder->port; @@ -891,15 +891,15 @@ void intel_audio_codec_disable(struct intel_encoder *encoder, connector->base.id, connector->name, encoder->base.base.id, encoder->base.name, pipe_name(pipe)); - if (dev_priv->audio.funcs) - dev_priv->audio.funcs->audio_codec_disable(encoder, - old_crtc_state, - old_conn_state); + if (dev_priv->display.funcs.audio) + dev_priv->display.funcs.audio->audio_codec_disable(encoder, + old_crtc_state, + old_conn_state); - mutex_lock(&dev_priv->audio.mutex); + mutex_lock(&dev_priv->display.audio.mutex); encoder->audio_connector = NULL; - dev_priv->audio.encoder_map[pipe] = NULL; - mutex_unlock(&dev_priv->audio.mutex); + dev_priv->display.audio.encoder_map[pipe] = NULL; + mutex_unlock(&dev_priv->display.audio.mutex); if (acomp && acomp->base.audio_ops && acomp->base.audio_ops->pin_eld_notify) { @@ -935,13 +935,13 @@ static const struct intel_audio_funcs hsw_audio_funcs = { void intel_audio_hooks_init(struct drm_i915_private *dev_priv) { if (IS_G4X(dev_priv)) { - dev_priv->audio.funcs = &g4x_audio_funcs; + dev_priv->display.funcs.audio = &g4x_audio_funcs; } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - dev_priv->audio.funcs = &ilk_audio_funcs; + dev_priv->display.funcs.audio = &ilk_audio_funcs; } else if (IS_HASWELL(dev_priv) || DISPLAY_VER(dev_priv) >= 8) { - dev_priv->audio.funcs = &hsw_audio_funcs; + dev_priv->display.funcs.audio = &hsw_audio_funcs; } else if (HAS_PCH_SPLIT(dev_priv)) { - dev_priv->audio.funcs = &ilk_audio_funcs; + dev_priv->display.funcs.audio = &ilk_audio_funcs; } } @@ -971,7 +971,7 @@ void intel_audio_cdclk_change_post(struct drm_i915_private *i915) struct aud_ts_cdclk_m_n aud_ts; if (DISPLAY_VER(i915) >= 13) { - get_aud_ts_cdclk_m_n(i915->cdclk.hw.ref, i915->cdclk.hw.cdclk, &aud_ts); + get_aud_ts_cdclk_m_n(i915->display.cdclk.hw.ref, i915->display.cdclk.hw.cdclk, &aud_ts); intel_de_write(i915, AUD_TS_CDCLK_N, aud_ts.n); intel_de_write(i915, AUD_TS_CDCLK_M, aud_ts.m | AUD_TS_CDCLK_M_EN); @@ -1046,13 +1046,13 @@ static unsigned long i915_audio_component_get_power(struct device *kdev) ret = intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO_PLAYBACK); - if (dev_priv->audio.power_refcount++ == 0) { + if (dev_priv->display.audio.power_refcount++ == 0) { if (DISPLAY_VER(dev_priv) >= 9) { intel_de_write(dev_priv, AUD_FREQ_CNTRL, - dev_priv->audio.freq_cntrl); + dev_priv->display.audio.freq_cntrl); drm_dbg_kms(&dev_priv->drm, "restored AUD_FREQ_CNTRL to 0x%x\n", - dev_priv->audio.freq_cntrl); + dev_priv->display.audio.freq_cntrl); } /* Force CDCLK to 2*BCLK as long as we need audio powered. */ @@ -1073,7 +1073,7 @@ static void i915_audio_component_put_power(struct device *kdev, struct drm_i915_private *dev_priv = kdev_to_i915(kdev); /* Stop forcing CDCLK to 2*BCLK if no need for audio to be powered. */ - if (--dev_priv->audio.power_refcount == 0) + if (--dev_priv->display.audio.power_refcount == 0) if (IS_GEMINILAKE(dev_priv)) glk_force_audio_cdclk(dev_priv, false); @@ -1119,7 +1119,7 @@ static int i915_audio_component_get_cdclk_freq(struct device *kdev) if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DDI(dev_priv))) return -ENODEV; - return dev_priv->cdclk.hw.cdclk; + return dev_priv->display.cdclk.hw.cdclk; } /* @@ -1140,10 +1140,10 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv, /* MST */ if (pipe >= 0) { if (drm_WARN_ON(&dev_priv->drm, - pipe >= ARRAY_SIZE(dev_priv->audio.encoder_map))) + pipe >= ARRAY_SIZE(dev_priv->display.audio.encoder_map))) return NULL; - encoder = dev_priv->audio.encoder_map[pipe]; + encoder = dev_priv->display.audio.encoder_map[pipe]; /* * when bootup, audio driver may not know it is * MST or not. So it will poll all the port & pipe @@ -1159,7 +1159,7 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv, return NULL; for_each_pipe(dev_priv, pipe) { - encoder = dev_priv->audio.encoder_map[pipe]; + encoder = dev_priv->display.audio.encoder_map[pipe]; if (encoder == NULL) continue; @@ -1177,7 +1177,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port, int pipe, int rate) { struct drm_i915_private *dev_priv = kdev_to_i915(kdev); - struct i915_audio_component *acomp = dev_priv->audio.component; + struct i915_audio_component *acomp = dev_priv->display.audio.component; struct intel_encoder *encoder; struct intel_crtc *crtc; unsigned long cookie; @@ -1187,7 +1187,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port, return 0; cookie = i915_audio_component_get_power(kdev); - mutex_lock(&dev_priv->audio.mutex); + mutex_lock(&dev_priv->display.audio.mutex); /* 1. get the pipe */ encoder = get_saved_enc(dev_priv, port, pipe); @@ -1206,7 +1206,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port, hsw_audio_config_update(encoder, crtc->config); unlock: - mutex_unlock(&dev_priv->audio.mutex); + mutex_unlock(&dev_priv->display.audio.mutex); i915_audio_component_put_power(kdev, cookie); return err; } @@ -1220,13 +1220,13 @@ static int i915_audio_component_get_eld(struct device *kdev, int port, const u8 *eld; int ret = -EINVAL; - mutex_lock(&dev_priv->audio.mutex); + mutex_lock(&dev_priv->display.audio.mutex); intel_encoder = get_saved_enc(dev_priv, port, pipe); if (!intel_encoder) { drm_dbg_kms(&dev_priv->drm, "Not valid for port %c\n", port_name(port)); - mutex_unlock(&dev_priv->audio.mutex); + mutex_unlock(&dev_priv->display.audio.mutex); return ret; } @@ -1238,7 +1238,7 @@ static int i915_audio_component_get_eld(struct device *kdev, int port, memcpy(buf, eld, min(max_bytes, ret)); } - mutex_unlock(&dev_priv->audio.mutex); + mutex_unlock(&dev_priv->display.audio.mutex); return ret; } @@ -1273,7 +1273,7 @@ static int i915_audio_component_bind(struct device *i915_kdev, BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS); for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++) acomp->aud_sample_rate[i] = 0; - dev_priv->audio.component = acomp; + dev_priv->display.audio.component = acomp; drm_modeset_unlock_all(&dev_priv->drm); return 0; @@ -1288,14 +1288,14 @@ static void i915_audio_component_unbind(struct device *i915_kdev, drm_modeset_lock_all(&dev_priv->drm); acomp->base.ops = NULL; acomp->base.dev = NULL; - dev_priv->audio.component = NULL; + dev_priv->display.audio.component = NULL; drm_modeset_unlock_all(&dev_priv->drm); device_link_remove(hda_kdev, i915_kdev); - if (dev_priv->audio.power_refcount) + if (dev_priv->display.audio.power_refcount) drm_err(&dev_priv->drm, "audio power refcount %d after unbind\n", - dev_priv->audio.power_refcount); + dev_priv->display.audio.power_refcount); } static const struct component_ops i915_audio_component_bind_ops = { @@ -1359,13 +1359,13 @@ static void i915_audio_component_init(struct drm_i915_private *dev_priv) drm_dbg_kms(&dev_priv->drm, "use AUD_FREQ_CNTRL of 0x%x (init value 0x%x)\n", aud_freq, aud_freq_init); - dev_priv->audio.freq_cntrl = aud_freq; + dev_priv->display.audio.freq_cntrl = aud_freq; } /* init with current cdclk */ intel_audio_cdclk_change_post(dev_priv); - dev_priv->audio.component_registered = true; + dev_priv->display.audio.component_registered = true; } /** @@ -1377,11 +1377,11 @@ static void i915_audio_component_init(struct drm_i915_private *dev_priv) */ static void i915_audio_component_cleanup(struct drm_i915_private *dev_priv) { - if (!dev_priv->audio.component_registered) + if (!dev_priv->display.audio.component_registered) return; component_del(dev_priv->drm.dev, &i915_audio_component_bind_ops); - dev_priv->audio.component_registered = false; + dev_priv->display.audio.component_registered = false; } /** @@ -1403,7 +1403,7 @@ void intel_audio_init(struct drm_i915_private *dev_priv) */ void intel_audio_deinit(struct drm_i915_private *dev_priv) { - if ((dev_priv)->audio.lpe.platdev != NULL) + if (dev_priv->display.audio.lpe.platdev != NULL) intel_lpe_audio_teardown(dev_priv); else i915_audio_component_cleanup(dev_priv); diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c index 110fc98ec280..11a1342d6d37 100644 --- a/drivers/gpu/drm/i915/display/intel_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_backlight.c @@ -9,6 +9,7 @@ #include <linux/string_helpers.h> #include "intel_backlight.h" +#include "intel_backlight_regs.h" #include "intel_connector.h" #include "intel_de.h" #include "intel_display_types.h" @@ -16,6 +17,8 @@ #include "intel_dsi_dcs_backlight.h" #include "intel_panel.h" #include "intel_pci_config.h" +#include "intel_pps.h" +#include "intel_quirks.h" /** * scale - scale values from one range to another @@ -86,7 +89,7 @@ u32 intel_backlight_invert_pwm_level(struct intel_connector *connector, u32 val) return val; if (dev_priv->params.invert_brightness > 0 || - dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) { + intel_has_quirk(dev_priv, QUIRK_INVERT_BRIGHTNESS)) { return panel->backlight.pwm_level_max - val + panel->backlight.pwm_level_min; } @@ -126,7 +129,7 @@ u32 intel_backlight_level_from_pwm(struct intel_connector *connector, u32 val) panel->backlight.max == 0 || panel->backlight.pwm_level_max == 0); if (dev_priv->params.invert_brightness > 0 || - (dev_priv->params.invert_brightness == 0 && dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS)) + (dev_priv->params.invert_brightness == 0 && intel_has_quirk(dev_priv, QUIRK_INVERT_BRIGHTNESS))) val = panel->backlight.pwm_level_max - (val - panel->backlight.pwm_level_min); return scale(val, panel->backlight.pwm_level_min, panel->backlight.pwm_level_max, @@ -303,7 +306,7 @@ void intel_backlight_set_acpi(const struct drm_connector_state *conn_state, if (!panel->backlight.present || !conn_state->crtc) return; - mutex_lock(&dev_priv->backlight_lock); + mutex_lock(&dev_priv->display.backlight.lock); drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0); @@ -319,7 +322,7 @@ void intel_backlight_set_acpi(const struct drm_connector_state *conn_state, if (panel->backlight.enabled) intel_panel_actually_set_backlight(conn_state, hw_level); - mutex_unlock(&dev_priv->backlight_lock); + mutex_unlock(&dev_priv->display.backlight.lock); } static void lpt_disable_backlight(const struct drm_connector_state *old_conn_state, u32 level) @@ -463,14 +466,14 @@ void intel_backlight_disable(const struct drm_connector_state *old_conn_state) return; } - mutex_lock(&dev_priv->backlight_lock); + mutex_lock(&dev_priv->display.backlight.lock); if (panel->backlight.device) panel->backlight.device->props.power = FB_BLANK_POWERDOWN; panel->backlight.enabled = false; panel->backlight.funcs->disable(old_conn_state, 0); - mutex_unlock(&dev_priv->backlight_lock); + mutex_unlock(&dev_priv->display.backlight.lock); } static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state, @@ -813,11 +816,11 @@ void intel_backlight_enable(const struct intel_crtc_state *crtc_state, drm_dbg_kms(&dev_priv->drm, "pipe %c\n", pipe_name(pipe)); - mutex_lock(&dev_priv->backlight_lock); + mutex_lock(&dev_priv->display.backlight.lock); __intel_backlight_enable(crtc_state, conn_state); - mutex_unlock(&dev_priv->backlight_lock); + mutex_unlock(&dev_priv->display.backlight.lock); } #if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE) @@ -827,12 +830,12 @@ static u32 intel_panel_get_backlight(struct intel_connector *connector) struct intel_panel *panel = &connector->panel; u32 val = 0; - mutex_lock(&dev_priv->backlight_lock); + mutex_lock(&dev_priv->display.backlight.lock); if (panel->backlight.enabled) val = panel->backlight.funcs->get(connector, intel_connector_get_pipe(connector)); - mutex_unlock(&dev_priv->backlight_lock); + mutex_unlock(&dev_priv->display.backlight.lock); drm_dbg_kms(&dev_priv->drm, "get backlight PWM = %d\n", val); return val; @@ -860,7 +863,7 @@ static void intel_panel_set_backlight(const struct drm_connector_state *conn_sta if (!panel->backlight.present) return; - mutex_lock(&dev_priv->backlight_lock); + mutex_lock(&dev_priv->display.backlight.lock); drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0); @@ -870,7 +873,7 @@ static void intel_panel_set_backlight(const struct drm_connector_state *conn_sta if (panel->backlight.enabled) intel_panel_actually_set_backlight(conn_state, hw_level); - mutex_unlock(&dev_priv->backlight_lock); + mutex_unlock(&dev_priv->display.backlight.lock); } static int intel_backlight_device_update_status(struct backlight_device *bd) @@ -971,26 +974,24 @@ int intel_backlight_device_register(struct intel_connector *connector) if (!name) return -ENOMEM; - bd = backlight_device_register(name, connector->base.kdev, connector, - &intel_backlight_device_ops, &props); - - /* - * Using the same name independent of the drm device or connector - * prevents registration of multiple backlight devices in the - * driver. However, we need to use the default name for backward - * compatibility. Use unique names for subsequent backlight devices as a - * fallback when the default name already exists. - */ - if (IS_ERR(bd) && PTR_ERR(bd) == -EEXIST) { + bd = backlight_device_get_by_name(name); + if (bd) { + put_device(&bd->dev); + /* + * Using the same name independent of the drm device or connector + * prevents registration of multiple backlight devices in the + * driver. However, we need to use the default name for backward + * compatibility. Use unique names for subsequent backlight devices as a + * fallback when the default name already exists. + */ kfree(name); name = kasprintf(GFP_KERNEL, "card%d-%s-backlight", i915->drm.primary->index, connector->base.name); if (!name) return -ENOMEM; - - bd = backlight_device_register(name, connector->base.kdev, connector, - &intel_backlight_device_ops, &props); } + bd = backlight_device_register(name, connector->base.kdev, connector, + &intel_backlight_device_ops, &props); if (IS_ERR(bd)) { drm_err(&i915->drm, @@ -1113,7 +1114,7 @@ static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) if (IS_PINEVIEW(dev_priv)) clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq); else - clock = KHz(dev_priv->cdclk.hw.cdclk); + clock = KHz(dev_priv->display.cdclk.hw.cdclk); return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * 32); } @@ -1131,7 +1132,7 @@ static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) if (IS_G4X(dev_priv)) clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq); else - clock = KHz(dev_priv->cdclk.hw.cdclk); + clock = KHz(dev_priv->display.cdclk.hw.cdclk); return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * 128); } @@ -1591,11 +1592,11 @@ void intel_backlight_update(struct intel_atomic_state *state, if (!panel->backlight.present) return; - mutex_lock(&dev_priv->backlight_lock); + mutex_lock(&dev_priv->display.backlight.lock); if (!panel->backlight.enabled) __intel_backlight_enable(crtc_state, conn_state); - mutex_unlock(&dev_priv->backlight_lock); + mutex_unlock(&dev_priv->display.backlight.lock); } int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe) @@ -1605,7 +1606,7 @@ int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe) int ret; if (!connector->panel.vbt.backlight.present) { - if (dev_priv->quirks & QUIRK_BACKLIGHT_PRESENT) { + if (intel_has_quirk(dev_priv, QUIRK_BACKLIGHT_PRESENT)) { drm_dbg_kms(&dev_priv->drm, "no backlight present per VBT, but present per quirk\n"); } else { @@ -1620,9 +1621,9 @@ int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe) return -ENODEV; /* set level and max in panel struct */ - mutex_lock(&dev_priv->backlight_lock); + mutex_lock(&dev_priv->display.backlight.lock); ret = panel->backlight.funcs->setup(connector, pipe); - mutex_unlock(&dev_priv->backlight_lock); + mutex_unlock(&dev_priv->display.backlight.lock); if (ret) { drm_dbg_kms(&dev_priv->drm, @@ -1773,9 +1774,13 @@ void intel_backlight_init_funcs(struct intel_panel *panel) panel->backlight.pwm_funcs = &i9xx_pwm_funcs; } - if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP && - intel_dp_aux_init_backlight_funcs(connector) == 0) - return; + if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) { + if (intel_dp_aux_init_backlight_funcs(connector) == 0) + return; + + if (!intel_has_quirk(dev_priv, QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK)) + connector->panel.backlight.power = intel_pps_backlight_power; + } /* We're using a standard PWM backlight interface */ panel->backlight.funcs = &pwm_bl_funcs; diff --git a/drivers/gpu/drm/i915/display/intel_backlight_regs.h b/drivers/gpu/drm/i915/display/intel_backlight_regs.h new file mode 100644 index 000000000000..50c1210f6d5d --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_backlight_regs.h @@ -0,0 +1,124 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __INTEL_BACKLIGHT_REGS_H__ +#define __INTEL_BACKLIGHT_REGS_H__ + +#include "i915_reg_defs.h" + +#define _VLV_BLC_PWM_CTL2_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61250) +#define _VLV_BLC_PWM_CTL2_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61350) +#define VLV_BLC_PWM_CTL2(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \ + _VLV_BLC_PWM_CTL2_B) + +#define _VLV_BLC_PWM_CTL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61254) +#define _VLV_BLC_PWM_CTL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61354) +#define VLV_BLC_PWM_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL_A, \ + _VLV_BLC_PWM_CTL_B) + +#define _VLV_BLC_HIST_CTL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61260) +#define _VLV_BLC_HIST_CTL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61360) +#define VLV_BLC_HIST_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_HIST_CTL_A, \ + _VLV_BLC_HIST_CTL_B) + +/* Backlight control */ +#define BLC_PWM_CTL2 _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61250) /* 965+ only */ +#define BLM_PWM_ENABLE (1 << 31) +#define BLM_COMBINATION_MODE (1 << 30) /* gen4 only */ +#define BLM_PIPE_SELECT (1 << 29) +#define BLM_PIPE_SELECT_IVB (3 << 29) +#define BLM_PIPE_A (0 << 29) +#define BLM_PIPE_B (1 << 29) +#define BLM_PIPE_C (2 << 29) /* ivb + */ +#define BLM_TRANSCODER_A BLM_PIPE_A /* hsw */ +#define BLM_TRANSCODER_B BLM_PIPE_B +#define BLM_TRANSCODER_C BLM_PIPE_C +#define BLM_TRANSCODER_EDP (3 << 29) +#define BLM_PIPE(pipe) ((pipe) << 29) +#define BLM_POLARITY_I965 (1 << 28) /* gen4 only */ +#define BLM_PHASE_IN_INTERUPT_STATUS (1 << 26) +#define BLM_PHASE_IN_ENABLE (1 << 25) +#define BLM_PHASE_IN_INTERUPT_ENABL (1 << 24) +#define BLM_PHASE_IN_TIME_BASE_SHIFT (16) +#define BLM_PHASE_IN_TIME_BASE_MASK (0xff << 16) +#define BLM_PHASE_IN_COUNT_SHIFT (8) +#define BLM_PHASE_IN_COUNT_MASK (0xff << 8) +#define BLM_PHASE_IN_INCR_SHIFT (0) +#define BLM_PHASE_IN_INCR_MASK (0xff << 0) +#define BLC_PWM_CTL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61254) +/* + * This is the most significant 15 bits of the number of backlight cycles in a + * complete cycle of the modulated backlight control. + * + * The actual value is this field multiplied by two. + */ +#define BACKLIGHT_MODULATION_FREQ_SHIFT (17) +#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17) +#define BLM_LEGACY_MODE (1 << 16) /* gen2 only */ +/* + * This is the number of cycles out of the backlight modulation cycle for which + * the backlight is on. + * + * This field must be no greater than the number of cycles in the complete + * backlight modulation cycle. + */ +#define BACKLIGHT_DUTY_CYCLE_SHIFT (0) +#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff) +#define BACKLIGHT_DUTY_CYCLE_MASK_PNV (0xfffe) +#define BLM_POLARITY_PNV (1 << 0) /* pnv only */ + +#define BLC_HIST_CTL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61260) +#define BLM_HISTOGRAM_ENABLE (1 << 31) + +/* New registers for PCH-split platforms. Safe where new bits show up, the + * register layout machtes with gen4 BLC_PWM_CTL[12]. */ +#define BLC_PWM_CPU_CTL2 _MMIO(0x48250) +#define BLC_PWM_CPU_CTL _MMIO(0x48254) + +#define HSW_BLC_PWM2_CTL _MMIO(0x48350) + +/* PCH CTL1 is totally different, all but the below bits are reserved. CTL2 is + * like the normal CTL from gen4 and earlier. Hooray for confusing naming. */ +#define BLC_PWM_PCH_CTL1 _MMIO(0xc8250) +#define BLM_PCH_PWM_ENABLE (1 << 31) +#define BLM_PCH_OVERRIDE_ENABLE (1 << 30) +#define BLM_PCH_POLARITY (1 << 29) +#define BLC_PWM_PCH_CTL2 _MMIO(0xc8254) + +/* BXT backlight register definition. */ +#define _BXT_BLC_PWM_CTL1 0xC8250 +#define BXT_BLC_PWM_ENABLE (1 << 31) +#define BXT_BLC_PWM_POLARITY (1 << 29) +#define _BXT_BLC_PWM_FREQ1 0xC8254 +#define _BXT_BLC_PWM_DUTY1 0xC8258 + +#define _BXT_BLC_PWM_CTL2 0xC8350 +#define _BXT_BLC_PWM_FREQ2 0xC8354 +#define _BXT_BLC_PWM_DUTY2 0xC8358 + +#define BXT_BLC_PWM_CTL(controller) _MMIO_PIPE(controller, \ + _BXT_BLC_PWM_CTL1, _BXT_BLC_PWM_CTL2) +#define BXT_BLC_PWM_FREQ(controller) _MMIO_PIPE(controller, \ + _BXT_BLC_PWM_FREQ1, _BXT_BLC_PWM_FREQ2) +#define BXT_BLC_PWM_DUTY(controller) _MMIO_PIPE(controller, \ + _BXT_BLC_PWM_DUTY1, _BXT_BLC_PWM_DUTY2) + +/* Utility pin */ +#define UTIL_PIN_CTL _MMIO(0x48400) +#define UTIL_PIN_ENABLE (1 << 31) +#define UTIL_PIN_PIPE_MASK (3 << 29) +#define UTIL_PIN_PIPE(x) ((x) << 29) +#define UTIL_PIN_MODE_MASK (0xf << 24) +#define UTIL_PIN_MODE_DATA (0 << 24) +#define UTIL_PIN_MODE_PWM (1 << 24) +#define UTIL_PIN_MODE_VBLANK (4 << 24) +#define UTIL_PIN_MODE_VSYNC (5 << 24) +#define UTIL_PIN_MODE_EYE_LEVEL (8 << 24) +#define UTIL_PIN_OUTPUT_DATA (1 << 23) +#define UTIL_PIN_POLARITY (1 << 22) +#define UTIL_PIN_DIRECTION_INPUT (1 << 19) +#define UTIL_PIN_INPUT_DATA (1 << 16) + +#endif /* __INTEL_BACKLIGHT_REGS_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index 51dde5bfd956..28bdb936cd1f 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -135,18 +135,6 @@ static u32 raw_block_offset(const void *bdb, enum bdb_block_id section_id) return block - bdb; } -/* size of the block excluding the header */ -static u32 raw_block_size(const void *bdb, enum bdb_block_id section_id) -{ - const void *block; - - block = find_raw_section(bdb, section_id); - if (!block) - return 0; - - return get_blocksize(block); -} - struct bdb_block_entry { struct list_head node; enum bdb_block_id section_id; @@ -159,7 +147,7 @@ find_section(struct drm_i915_private *i915, { struct bdb_block_entry *entry; - list_for_each_entry(entry, &i915->vbt.bdb_blocks, node) { + list_for_each_entry(entry, &i915->display.vbt.bdb_blocks, node) { if (entry->section_id == section_id) return entry->data + 3; } @@ -231,9 +219,14 @@ static bool validate_lfp_data_ptrs(const void *bdb, { int fp_timing_size, dvo_timing_size, panel_pnp_id_size, panel_name_size; int data_block_size, lfp_data_size; + const void *data_block; int i; - data_block_size = raw_block_size(bdb, BDB_LVDS_LFP_DATA); + data_block = find_raw_section(bdb, BDB_LVDS_LFP_DATA); + if (!data_block) + return false; + + data_block_size = get_blocksize(data_block); if (data_block_size == 0) return false; @@ -261,21 +254,6 @@ static bool validate_lfp_data_ptrs(const void *bdb, if (16 * lfp_data_size > data_block_size) return false; - /* - * Except for vlv/chv machines all real VBTs seem to have 6 - * unaccounted bytes in the fp_timing table. And it doesn't - * appear to be a really intentional hole as the fp_timing - * 0xffff terminator is always within those 6 missing bytes. - */ - if (fp_timing_size + dvo_timing_size + panel_pnp_id_size != lfp_data_size && - fp_timing_size + 6 + dvo_timing_size + panel_pnp_id_size != lfp_data_size) - return false; - - if (ptrs->ptr[0].fp_timing.offset + fp_timing_size > ptrs->ptr[0].dvo_timing.offset || - ptrs->ptr[0].dvo_timing.offset + dvo_timing_size != ptrs->ptr[0].panel_pnp_id.offset || - ptrs->ptr[0].panel_pnp_id.offset + panel_pnp_id_size != lfp_data_size) - return false; - /* make sure the table entries have uniform size */ for (i = 1; i < 16; i++) { if (ptrs->ptr[i].fp_timing.table_size != fp_timing_size || @@ -289,6 +267,23 @@ static bool validate_lfp_data_ptrs(const void *bdb, return false; } + /* + * Except for vlv/chv machines all real VBTs seem to have 6 + * unaccounted bytes in the fp_timing table. And it doesn't + * appear to be a really intentional hole as the fp_timing + * 0xffff terminator is always within those 6 missing bytes. + */ + if (fp_timing_size + 6 + dvo_timing_size + panel_pnp_id_size == lfp_data_size) + fp_timing_size += 6; + + if (fp_timing_size + dvo_timing_size + panel_pnp_id_size != lfp_data_size) + return false; + + if (ptrs->ptr[0].fp_timing.offset + fp_timing_size != ptrs->ptr[0].dvo_timing.offset || + ptrs->ptr[0].dvo_timing.offset + dvo_timing_size != ptrs->ptr[0].panel_pnp_id.offset || + ptrs->ptr[0].panel_pnp_id.offset + panel_pnp_id_size != lfp_data_size) + return false; + /* make sure the tables fit inside the data block */ for (i = 0; i < 16; i++) { if (ptrs->ptr[i].fp_timing.offset + fp_timing_size > data_block_size || @@ -300,6 +295,15 @@ static bool validate_lfp_data_ptrs(const void *bdb, if (ptrs->panel_name.offset + 16 * panel_name_size > data_block_size) return false; + /* make sure fp_timing terminators are present at expected locations */ + for (i = 0; i < 16; i++) { + const u16 *t = data_block + ptrs->ptr[i].fp_timing.offset + + fp_timing_size - 2; + + if (*t != 0xffff) + return false; + } + return true; } @@ -333,18 +337,6 @@ static bool fixup_lfp_data_ptrs(const void *bdb, void *ptrs_block) return validate_lfp_data_ptrs(bdb, ptrs); } -static const void *find_fp_timing_terminator(const u8 *data, int size) -{ - int i; - - for (i = 0; i < size - 1; i++) { - if (data[i] == 0xff && data[i+1] == 0xff) - return &data[i]; - } - - return NULL; -} - static int make_lfp_data_ptr(struct lvds_lfp_data_ptr_table *table, int table_size, int total_size) { @@ -368,11 +360,22 @@ static void next_lfp_data_ptr(struct lvds_lfp_data_ptr_table *next, static void *generate_lfp_data_ptrs(struct drm_i915_private *i915, const void *bdb) { - int i, size, table_size, block_size, offset; - const void *t0, *t1, *block; + int i, size, table_size, block_size, offset, fp_timing_size; struct bdb_lvds_lfp_data_ptrs *ptrs; + const void *block; void *ptrs_block; + /* + * The hardcoded fp_timing_size is only valid for + * modernish VBTs. All older VBTs definitely should + * include block 41 and thus we don't need to + * generate one. + */ + if (i915->display.vbt.version < 155) + return NULL; + + fp_timing_size = 38; + block = find_raw_section(bdb, BDB_LVDS_LFP_DATA); if (!block) return NULL; @@ -381,17 +384,8 @@ static void *generate_lfp_data_ptrs(struct drm_i915_private *i915, block_size = get_blocksize(block); - size = block_size; - t0 = find_fp_timing_terminator(block, size); - if (!t0) - return NULL; - - size -= t0 - block - 2; - t1 = find_fp_timing_terminator(t0 + 2, size); - if (!t1) - return NULL; - - size = t1 - t0; + size = fp_timing_size + sizeof(struct lvds_dvo_timing) + + sizeof(struct lvds_pnp_id); if (size * 16 > block_size) return NULL; @@ -409,7 +403,7 @@ static void *generate_lfp_data_ptrs(struct drm_i915_private *i915, table_size = sizeof(struct lvds_dvo_timing); size = make_lfp_data_ptr(&ptrs->ptr[0].dvo_timing, table_size, size); - table_size = t0 - block + 2; + table_size = fp_timing_size; size = make_lfp_data_ptr(&ptrs->ptr[0].fp_timing, table_size, size); if (ptrs->ptr[0].fp_timing.table_size) @@ -424,14 +418,14 @@ static void *generate_lfp_data_ptrs(struct drm_i915_private *i915, return NULL; } - size = t1 - t0; + size = fp_timing_size + sizeof(struct lvds_dvo_timing) + + sizeof(struct lvds_pnp_id); for (i = 1; i < 16; i++) { next_lfp_data_ptr(&ptrs->ptr[i].fp_timing, &ptrs->ptr[i-1].fp_timing, size); next_lfp_data_ptr(&ptrs->ptr[i].dvo_timing, &ptrs->ptr[i-1].dvo_timing, size); next_lfp_data_ptr(&ptrs->ptr[i].panel_pnp_id, &ptrs->ptr[i-1].panel_pnp_id, size); } - size = t1 - t0; table_size = sizeof(struct lvds_lfp_panel_name); if (16 * (size + table_size) <= block_size) { @@ -479,6 +473,13 @@ init_bdb_block(struct drm_i915_private *i915, block_size = get_blocksize(block); + /* + * Version number and new block size are considered + * part of the header for MIPI sequenece block v3+. + */ + if (section_id == BDB_MIPI_SEQUENCE && *(const u8 *)block >= 3) + block_size += 5; + entry = kzalloc(struct_size(entry, data, max(min_size, block_size) + 3), GFP_KERNEL); if (!entry) { @@ -501,7 +502,7 @@ init_bdb_block(struct drm_i915_private *i915, return; } - list_add_tail(&entry->node, &i915->vbt.bdb_blocks); + list_add_tail(&entry->node, &i915->display.vbt.bdb_blocks); } static void init_bdb_blocks(struct drm_i915_private *i915, @@ -604,6 +605,19 @@ get_lfp_data_tail(const struct bdb_lvds_lfp_data *data, return NULL; } +static void dump_pnp_id(struct drm_i915_private *i915, + const struct lvds_pnp_id *pnp_id, + const char *name) +{ + u16 mfg_name = be16_to_cpu((__force __be16)pnp_id->mfg_name); + char vend[4]; + + drm_dbg_kms(&i915->drm, "%s PNPID mfg: %s (0x%x), prod: %u, serial: %u, week: %d, year: %d\n", + name, drm_edid_decode_mfg_id(mfg_name, vend), + pnp_id->mfg_name, pnp_id->product_code, pnp_id->serial, + pnp_id->mfg_week, pnp_id->mfg_year + 1990); +} + static int opregion_get_panel_type(struct drm_i915_private *i915, const struct intel_bios_encoder_data *devdata, const struct edid *edid) @@ -655,6 +669,8 @@ static int pnpid_get_panel_type(struct drm_i915_private *i915, edid_id_nodate.mfg_week = 0; edid_id_nodate.mfg_year = 0; + dump_pnp_id(i915, edid_id, "EDID"); + ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS); if (!ptrs) return -1; @@ -861,6 +877,7 @@ parse_lfp_data(struct drm_i915_private *i915, const struct bdb_lvds_lfp_data *data; const struct bdb_lvds_lfp_data_tail *tail; const struct bdb_lvds_lfp_data_ptrs *ptrs; + const struct lvds_pnp_id *pnp_id; int panel_type = panel->vbt.panel_type; ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS); @@ -874,11 +891,18 @@ parse_lfp_data(struct drm_i915_private *i915, if (!panel->vbt.lfp_lvds_vbt_mode) parse_lfp_panel_dtd(i915, panel, data, ptrs); + pnp_id = get_lvds_pnp_id(data, ptrs, panel_type); + dump_pnp_id(i915, pnp_id, "Panel"); + tail = get_lfp_data_tail(data, ptrs); if (!tail) return; - if (i915->vbt.version >= 188) { + drm_dbg_kms(&i915->drm, "Panel name: %.*s\n", + (int)sizeof(tail->panel_name[0].name), + tail->panel_name[panel_type].name); + + if (i915->display.vbt.version >= 188) { panel->vbt.seamless_drrs_min_refresh_rate = tail->seamless_drrs_min_refresh_rate[panel_type]; drm_dbg_kms(&i915->drm, @@ -904,7 +928,7 @@ parse_generic_dtd(struct drm_i915_private *i915, * first on VBT >= 229, but still fall back to trying the old LFP * block if that fails. */ - if (i915->vbt.version < 229) + if (i915->display.vbt.version < 229) return; generic_dtd = find_section(i915, BDB_GENERIC_DTD); @@ -1008,12 +1032,12 @@ parse_lfp_backlight(struct drm_i915_private *i915, } panel->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI; - if (i915->vbt.version >= 191) { + if (i915->display.vbt.version >= 191) { size_t exp_size; - if (i915->vbt.version >= 236) + if (i915->display.vbt.version >= 236) exp_size = sizeof(struct bdb_lfp_backlight_data); - else if (i915->vbt.version >= 234) + else if (i915->display.vbt.version >= 234) exp_size = EXP_BDB_LFP_BL_DATA_SIZE_REV_234; else exp_size = EXP_BDB_LFP_BL_DATA_SIZE_REV_191; @@ -1030,14 +1054,14 @@ parse_lfp_backlight(struct drm_i915_private *i915, panel->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz; panel->vbt.backlight.active_low_pwm = entry->active_low_pwm; - if (i915->vbt.version >= 234) { + if (i915->display.vbt.version >= 234) { u16 min_level; bool scale; level = backlight_data->brightness_level[panel_type].level; min_level = backlight_data->brightness_min_level[panel_type].level; - if (i915->vbt.version >= 236) + if (i915->display.vbt.version >= 236) scale = backlight_data->brightness_precision_bits[panel_type] == 16; else scale = level > 255; @@ -1134,37 +1158,37 @@ parse_general_features(struct drm_i915_private *i915) if (!general) return; - i915->vbt.int_tv_support = general->int_tv_support; + i915->display.vbt.int_tv_support = general->int_tv_support; /* int_crt_support can't be trusted on earlier platforms */ - if (i915->vbt.version >= 155 && + if (i915->display.vbt.version >= 155 && (HAS_DDI(i915) || IS_VALLEYVIEW(i915))) - i915->vbt.int_crt_support = general->int_crt_support; - i915->vbt.lvds_use_ssc = general->enable_ssc; - i915->vbt.lvds_ssc_freq = + i915->display.vbt.int_crt_support = general->int_crt_support; + i915->display.vbt.lvds_use_ssc = general->enable_ssc; + i915->display.vbt.lvds_ssc_freq = intel_bios_ssc_frequency(i915, general->ssc_freq); - i915->vbt.display_clock_mode = general->display_clock_mode; - i915->vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted; - if (i915->vbt.version >= 181) { - i915->vbt.orientation = general->rotate_180 ? + i915->display.vbt.display_clock_mode = general->display_clock_mode; + i915->display.vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted; + if (i915->display.vbt.version >= 181) { + i915->display.vbt.orientation = general->rotate_180 ? DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP : DRM_MODE_PANEL_ORIENTATION_NORMAL; } else { - i915->vbt.orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN; + i915->display.vbt.orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN; } - if (i915->vbt.version >= 249 && general->afc_startup_config) { - i915->vbt.override_afc_startup = true; - i915->vbt.override_afc_startup_val = general->afc_startup_config == 0x1 ? 0x0 : 0x7; + if (i915->display.vbt.version >= 249 && general->afc_startup_config) { + i915->display.vbt.override_afc_startup = true; + i915->display.vbt.override_afc_startup_val = general->afc_startup_config == 0x1 ? 0x0 : 0x7; } drm_dbg_kms(&i915->drm, "BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n", - i915->vbt.int_tv_support, - i915->vbt.int_crt_support, - i915->vbt.lvds_use_ssc, - i915->vbt.lvds_ssc_freq, - i915->vbt.display_clock_mode, - i915->vbt.fdi_rx_polarity_inverted); + i915->display.vbt.int_tv_support, + i915->display.vbt.int_crt_support, + i915->display.vbt.lvds_use_ssc, + i915->display.vbt.lvds_ssc_freq, + i915->display.vbt.display_clock_mode, + i915->display.vbt.fdi_rx_polarity_inverted); } static const struct child_device_config * @@ -1190,7 +1214,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *i915) return; } - list_for_each_entry(devdata, &i915->vbt.display_devices, node) { + list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { child = &devdata->child; if (child->slave_addr != SLAVE_ADDR1 && @@ -1214,7 +1238,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *i915) child->slave_addr, (child->dvo_port == DEVICE_PORT_DVOB) ? "SDVOB" : "SDVOC"); - mapping = &i915->vbt.sdvo_mappings[child->dvo_port - 1]; + mapping = &i915->display.vbt.sdvo_mappings[child->dvo_port - 1]; if (!mapping->initialized) { mapping->dvo_port = child->dvo_port; mapping->slave_addr = child->slave_addr; @@ -1265,7 +1289,7 @@ parse_driver_features(struct drm_i915_private *i915) * interpretation, but real world VBTs seem to. */ if (driver->lvds_config != BDB_DRIVER_FEATURE_INT_LVDS) - i915->vbt.int_lvds_support = 0; + i915->display.vbt.int_lvds_support = 0; } else { /* * FIXME it's not clear which BDB version has the LVDS config @@ -1278,10 +1302,10 @@ parse_driver_features(struct drm_i915_private *i915) * in the wild with the bits correctly populated. Version * 108 (on i85x) does not have the bits correctly populated. */ - if (i915->vbt.version >= 134 && + if (i915->display.vbt.version >= 134 && driver->lvds_config != BDB_DRIVER_FEATURE_INT_LVDS && driver->lvds_config != BDB_DRIVER_FEATURE_INT_SDVO_LVDS) - i915->vbt.int_lvds_support = 0; + i915->display.vbt.int_lvds_support = 0; } } @@ -1295,7 +1319,7 @@ parse_panel_driver_features(struct drm_i915_private *i915, if (!driver) return; - if (i915->vbt.version < 228) { + if (i915->display.vbt.version < 228) { drm_dbg_kms(&i915->drm, "DRRS State Enabled:%d\n", driver->drrs_enabled); /* @@ -1328,7 +1352,7 @@ parse_power_conservation_features(struct drm_i915_private *i915, panel->vbt.vrr = true; /* matches Windows behaviour */ - if (i915->vbt.version < 228) + if (i915->display.vbt.version < 228) return; power = find_section(i915, BDB_LFP_POWER); @@ -1354,10 +1378,10 @@ parse_power_conservation_features(struct drm_i915_private *i915, panel->vbt.drrs_type = DRRS_TYPE_NONE; } - if (i915->vbt.version >= 232) + if (i915->display.vbt.version >= 232) panel->vbt.edp.hobl = panel_bool(power->hobl, panel_type); - if (i915->vbt.version >= 233) + if (i915->display.vbt.version >= 233) panel->vbt.vrr = panel_bool(power->vrr_feature_enabled, panel_type); } @@ -1393,7 +1417,7 @@ parse_edp(struct drm_i915_private *i915, panel->vbt.edp.pps = *edp_pps; - if (i915->vbt.version >= 224) { + if (i915->display.vbt.version >= 224) { panel->vbt.edp.rate = edp->edp_fast_link_training_rate[panel_type] * 20; } else { @@ -1472,7 +1496,7 @@ parse_edp(struct drm_i915_private *i915, break; } - if (i915->vbt.version >= 173) { + if (i915->display.vbt.version >= 173) { u8 vswing; /* Don't read from VBT if module parameter has valid value*/ @@ -1488,7 +1512,7 @@ parse_edp(struct drm_i915_private *i915, panel->vbt.edp.drrs_msa_timing_delay = panel_bits(edp->sdrrs_msa_timing_delay, panel_type, 2); - if (i915->vbt.version >= 244) + if (i915->display.vbt.version >= 244) panel->vbt.edp.max_link_rate = edp->edp_max_port_link_rate[panel_type] * 20; } @@ -1520,7 +1544,7 @@ parse_psr(struct drm_i915_private *i915, * New psr options 0=500us, 1=100us, 2=2500us, 3=0us * Old decimal value is wake up time in multiples of 100 us. */ - if (i915->vbt.version >= 205 && + if (i915->display.vbt.version >= 205 && (DISPLAY_VER(i915) >= 9 && !IS_BROXTON(i915))) { switch (psr_table->tp1_wakeup_time) { case 0: @@ -1566,7 +1590,7 @@ parse_psr(struct drm_i915_private *i915, panel->vbt.psr.tp2_tp3_wakeup_time_us = psr_table->tp2_tp3_wakeup_time * 100; } - if (i915->vbt.version >= 226) { + if (i915->display.vbt.version >= 226) { u32 wakeup_time = psr->psr2_tp2_tp3_wakeup_time; wakeup_time = panel_bits(wakeup_time, panel_type, 2); @@ -1596,7 +1620,9 @@ static void parse_dsi_backlight_ports(struct drm_i915_private *i915, struct intel_panel *panel, enum port port) { - if (!panel->vbt.dsi.config->dual_link || i915->vbt.version < 197) { + enum port port_bc = DISPLAY_VER(i915) >= 11 ? PORT_B : PORT_C; + + if (!panel->vbt.dsi.config->dual_link || i915->display.vbt.version < 197) { panel->vbt.dsi.bl_ports = BIT(port); if (panel->vbt.dsi.config->cabc_supported) panel->vbt.dsi.cabc_ports = BIT(port); @@ -1609,11 +1635,11 @@ static void parse_dsi_backlight_ports(struct drm_i915_private *i915, panel->vbt.dsi.bl_ports = BIT(PORT_A); break; case DL_DCS_PORT_C: - panel->vbt.dsi.bl_ports = BIT(PORT_C); + panel->vbt.dsi.bl_ports = BIT(port_bc); break; default: case DL_DCS_PORT_A_AND_C: - panel->vbt.dsi.bl_ports = BIT(PORT_A) | BIT(PORT_C); + panel->vbt.dsi.bl_ports = BIT(PORT_A) | BIT(port_bc); break; } @@ -1625,12 +1651,12 @@ static void parse_dsi_backlight_ports(struct drm_i915_private *i915, panel->vbt.dsi.cabc_ports = BIT(PORT_A); break; case DL_DCS_PORT_C: - panel->vbt.dsi.cabc_ports = BIT(PORT_C); + panel->vbt.dsi.cabc_ports = BIT(port_bc); break; default: case DL_DCS_PORT_A_AND_C: panel->vbt.dsi.cabc_ports = - BIT(PORT_A) | BIT(PORT_C); + BIT(PORT_A) | BIT(port_bc); break; } } @@ -2051,7 +2077,7 @@ parse_compression_parameters(struct drm_i915_private *i915) u16 block_size; int index; - if (i915->vbt.version < 198) + if (i915->display.vbt.version < 198) return; params = find_section(i915, BDB_COMPRESSION_PARAMETERS); @@ -2071,7 +2097,7 @@ parse_compression_parameters(struct drm_i915_private *i915) } } - list_for_each_entry(devdata, &i915->vbt.display_devices, node) { + list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { child = &devdata->child; if (!child->compression_enable) @@ -2205,7 +2231,7 @@ static enum port get_port_by_ddc_pin(struct drm_i915_private *i915, u8 ddc_pin) return PORT_NONE; for_each_port(port) { - devdata = i915->vbt.ports[port]; + devdata = i915->display.vbt.ports[port]; if (devdata && ddc_pin == devdata->child.ddc_pin) return port; @@ -2254,7 +2280,7 @@ static void sanitize_ddc_pin(struct intel_bios_encoder_data *devdata, * there are real machines (eg. Asrock B250M-HDV) where VBT has both * port A and port E with the same AUX ch and we must pick port E :( */ - child = &i915->vbt.ports[p]->child; + child = &i915->display.vbt.ports[p]->child; child->device_type &= ~DEVICE_TYPE_TMDS_DVI_SIGNALING; child->device_type |= DEVICE_TYPE_NOT_HDMI_OUTPUT; @@ -2271,7 +2297,7 @@ static enum port get_port_by_aux_ch(struct drm_i915_private *i915, u8 aux_ch) return PORT_NONE; for_each_port(port) { - devdata = i915->vbt.ports[port]; + devdata = i915->display.vbt.ports[port]; if (devdata && aux_ch == devdata->child.aux_channel) return port; @@ -2306,7 +2332,7 @@ static void sanitize_aux_ch(struct intel_bios_encoder_data *devdata, * there are real machines (eg. Asrock B250M-HDV) where VBT has both * port A and port E with the same AUX ch and we must pick port E :( */ - child = &i915->vbt.ports[p]->child; + child = &i915->display.vbt.ports[p]->child; child->device_type &= ~DEVICE_TYPE_DISPLAYPORT_OUTPUT; child->aux_channel = 0; @@ -2418,7 +2444,7 @@ static enum port dvo_port_to_port(struct drm_i915_private *i915, [PORT_TC4] = { DVO_PORT_HDMII, DVO_PORT_DPI, -1 }, }; - if (DISPLAY_VER(i915) == 13) + if (DISPLAY_VER(i915) >= 13) return __dvo_port_to_port(ARRAY_SIZE(xelpd_port_mapping), ARRAY_SIZE(xelpd_port_mapping[0]), xelpd_port_mapping, @@ -2480,15 +2506,23 @@ static int parse_bdb_216_dp_max_link_rate(const int vbt_max_link_rate) static int _intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *devdata) { - if (!devdata || devdata->i915->vbt.version < 216) + if (!devdata || devdata->i915->display.vbt.version < 216) return 0; - if (devdata->i915->vbt.version >= 230) + if (devdata->i915->display.vbt.version >= 230) return parse_bdb_230_dp_max_link_rate(devdata->child.dp_max_link_rate); else return parse_bdb_216_dp_max_link_rate(devdata->child.dp_max_link_rate); } +static int _intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata) +{ + if (!devdata || devdata->i915->display.vbt.version < 244) + return 0; + + return devdata->child.dp_max_lane_count + 1; +} + static void sanitize_device_type(struct intel_bios_encoder_data *devdata, enum port port) { @@ -2544,7 +2578,7 @@ intel_bios_encoder_supports_edp(const struct intel_bios_encoder_data *devdata) static int _intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata) { - if (!devdata || devdata->i915->vbt.version < 158) + if (!devdata || devdata->i915->display.vbt.version < 158) return -1; return devdata->child.hdmi_level_shifter_value; @@ -2552,7 +2586,7 @@ static int _intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *de static int _intel_bios_max_tmds_clock(const struct intel_bios_encoder_data *devdata) { - if (!devdata || devdata->i915->vbt.version < 204) + if (!devdata || devdata->i915->display.vbt.version < 204) return 0; switch (devdata->child.hdmi_max_data_rate) { @@ -2661,7 +2695,7 @@ static void parse_ddi_port(struct intel_bios_encoder_data *devdata) return; } - if (i915->vbt.ports[port]) { + if (i915->display.vbt.ports[port]) { drm_dbg_kms(&i915->drm, "More than one child device for port %c in VBT, using the first.\n", port_name(port)); @@ -2676,7 +2710,7 @@ static void parse_ddi_port(struct intel_bios_encoder_data *devdata) if (intel_bios_encoder_supports_dp(devdata)) sanitize_aux_ch(devdata, port); - i915->vbt.ports[port] = devdata; + i915->display.vbt.ports[port] = devdata; } static bool has_ddi_port_info(struct drm_i915_private *i915) @@ -2692,12 +2726,12 @@ static void parse_ddi_ports(struct drm_i915_private *i915) if (!has_ddi_port_info(i915)) return; - list_for_each_entry(devdata, &i915->vbt.display_devices, node) + list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) parse_ddi_port(devdata); for_each_port(port) { - if (i915->vbt.ports[port]) - print_ddi_port(i915->vbt.ports[port], port); + if (i915->display.vbt.ports[port]) + print_ddi_port(i915->display.vbt.ports[port], port); } } @@ -2730,33 +2764,33 @@ parse_general_definitions(struct drm_i915_private *i915) bus_pin = defs->crt_ddc_gmbus_pin; drm_dbg_kms(&i915->drm, "crt_ddc_bus_pin: %d\n", bus_pin); if (intel_gmbus_is_valid_pin(i915, bus_pin)) - i915->vbt.crt_ddc_pin = bus_pin; + i915->display.vbt.crt_ddc_pin = bus_pin; - if (i915->vbt.version < 106) { + if (i915->display.vbt.version < 106) { expected_size = 22; - } else if (i915->vbt.version < 111) { + } else if (i915->display.vbt.version < 111) { expected_size = 27; - } else if (i915->vbt.version < 195) { + } else if (i915->display.vbt.version < 195) { expected_size = LEGACY_CHILD_DEVICE_CONFIG_SIZE; - } else if (i915->vbt.version == 195) { + } else if (i915->display.vbt.version == 195) { expected_size = 37; - } else if (i915->vbt.version <= 215) { + } else if (i915->display.vbt.version <= 215) { expected_size = 38; - } else if (i915->vbt.version <= 237) { + } else if (i915->display.vbt.version <= 237) { expected_size = 39; } else { expected_size = sizeof(*child); BUILD_BUG_ON(sizeof(*child) < 39); drm_dbg(&i915->drm, "Expected child device config size for VBT version %u not known; assuming %u\n", - i915->vbt.version, expected_size); + i915->display.vbt.version, expected_size); } /* Flag an error for unexpected size, but continue anyway. */ if (defs->child_dev_size != expected_size) drm_err(&i915->drm, "Unexpected child device config size %u (expected %u for VBT version %u)\n", - defs->child_dev_size, expected_size, i915->vbt.version); + defs->child_dev_size, expected_size, i915->display.vbt.version); /* The legacy sized child device config is the minimum we need. */ if (defs->child_dev_size < LEGACY_CHILD_DEVICE_CONFIG_SIZE) { @@ -2792,10 +2826,10 @@ parse_general_definitions(struct drm_i915_private *i915) memcpy(&devdata->child, child, min_t(size_t, defs->child_dev_size, sizeof(*child))); - list_add_tail(&devdata->node, &i915->vbt.display_devices); + list_add_tail(&devdata->node, &i915->display.vbt.display_devices); } - if (list_empty(&i915->vbt.display_devices)) + if (list_empty(&i915->display.vbt.display_devices)) drm_dbg_kms(&i915->drm, "no child dev is parsed from VBT\n"); } @@ -2804,25 +2838,25 @@ parse_general_definitions(struct drm_i915_private *i915) static void init_vbt_defaults(struct drm_i915_private *i915) { - i915->vbt.crt_ddc_pin = GMBUS_PIN_VGADDC; + i915->display.vbt.crt_ddc_pin = GMBUS_PIN_VGADDC; /* general features */ - i915->vbt.int_tv_support = 1; - i915->vbt.int_crt_support = 1; + i915->display.vbt.int_tv_support = 1; + i915->display.vbt.int_crt_support = 1; /* driver features */ - i915->vbt.int_lvds_support = 1; + i915->display.vbt.int_lvds_support = 1; /* Default to using SSC */ - i915->vbt.lvds_use_ssc = 1; + i915->display.vbt.lvds_use_ssc = 1; /* * Core/SandyBridge/IvyBridge use alternative (120MHz) reference * clock for LVDS. */ - i915->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(i915, - !HAS_PCH_SPLIT(i915)); + i915->display.vbt.lvds_ssc_freq = intel_bios_ssc_frequency(i915, + !HAS_PCH_SPLIT(i915)); drm_dbg_kms(&i915->drm, "Set default to SSC at %d kHz\n", - i915->vbt.lvds_ssc_freq); + i915->display.vbt.lvds_ssc_freq); } /* Common defaults which may be overridden by VBT. */ @@ -2883,7 +2917,7 @@ init_vbt_missing_defaults(struct drm_i915_private *i915) if (port == PORT_A) child->device_type |= DEVICE_TYPE_INTERNAL_CONNECTOR; - list_add_tail(&devdata->node, &i915->vbt.display_devices); + list_add_tail(&devdata->node, &i915->display.vbt.display_devices); drm_dbg_kms(&i915->drm, "Generating default VBT child device with type 0x04%x on port %c\n", @@ -2891,7 +2925,7 @@ init_vbt_missing_defaults(struct drm_i915_private *i915) } /* Bypass some minimum baseline VBT version checks */ - i915->vbt.version = 155; + i915->display.vbt.version = 155; } static const struct bdb_header *get_bdb_header(const struct vbt_header *vbt) @@ -3078,12 +3112,12 @@ err_unmap_oprom: */ void intel_bios_init(struct drm_i915_private *i915) { - const struct vbt_header *vbt = i915->opregion.vbt; + const struct vbt_header *vbt = i915->display.opregion.vbt; struct vbt_header *oprom_vbt = NULL; const struct bdb_header *bdb; - INIT_LIST_HEAD(&i915->vbt.display_devices); - INIT_LIST_HEAD(&i915->vbt.bdb_blocks); + INIT_LIST_HEAD(&i915->display.vbt.display_devices); + INIT_LIST_HEAD(&i915->display.vbt.bdb_blocks); if (!HAS_DISPLAY(i915)) { drm_dbg_kms(&i915->drm, @@ -3111,11 +3145,11 @@ void intel_bios_init(struct drm_i915_private *i915) goto out; bdb = get_bdb_header(vbt); - i915->vbt.version = bdb->version; + i915->display.vbt.version = bdb->version; drm_dbg_kms(&i915->drm, "VBT signature \"%.*s\", BDB version %d\n", - (int)sizeof(vbt->signature), vbt->signature, i915->vbt.version); + (int)sizeof(vbt->signature), vbt->signature, i915->display.vbt.version); init_bdb_blocks(i915, bdb); @@ -3172,13 +3206,13 @@ void intel_bios_driver_remove(struct drm_i915_private *i915) struct intel_bios_encoder_data *devdata, *nd; struct bdb_block_entry *entry, *ne; - list_for_each_entry_safe(devdata, nd, &i915->vbt.display_devices, node) { + list_for_each_entry_safe(devdata, nd, &i915->display.vbt.display_devices, node) { list_del(&devdata->node); kfree(devdata->dsc); kfree(devdata); } - list_for_each_entry_safe(entry, ne, &i915->vbt.bdb_blocks, node) { + list_for_each_entry_safe(entry, ne, &i915->display.vbt.bdb_blocks, node) { list_del(&entry->node); kfree(entry); } @@ -3212,13 +3246,13 @@ bool intel_bios_is_tv_present(struct drm_i915_private *i915) const struct intel_bios_encoder_data *devdata; const struct child_device_config *child; - if (!i915->vbt.int_tv_support) + if (!i915->display.vbt.int_tv_support) return false; - if (list_empty(&i915->vbt.display_devices)) + if (list_empty(&i915->display.vbt.display_devices)) return true; - list_for_each_entry(devdata, &i915->vbt.display_devices, node) { + list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { child = &devdata->child; /* @@ -3255,10 +3289,10 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *i915, u8 *i2c_pin) const struct intel_bios_encoder_data *devdata; const struct child_device_config *child; - if (list_empty(&i915->vbt.display_devices)) + if (list_empty(&i915->display.vbt.display_devices)) return true; - list_for_each_entry(devdata, &i915->vbt.display_devices, node) { + list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { child = &devdata->child; /* If the device type is not LFP, continue. @@ -3285,7 +3319,7 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *i915, u8 *i2c_pin) * additional data. Trust that if the VBT was written into * the OpRegion then they have validated the LVDS's existence. */ - if (i915->opregion.vbt) + if (i915->display.opregion.vbt) return true; } @@ -3304,7 +3338,7 @@ bool intel_bios_is_port_present(struct drm_i915_private *i915, enum port port) if (WARN_ON(!has_ddi_port_info(i915))) return true; - return i915->vbt.ports[port]; + return i915->display.vbt.ports[port]; } /** @@ -3364,7 +3398,7 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *i915, const struct child_device_config *child; u8 dvo_port; - list_for_each_entry(devdata, &i915->vbt.display_devices, node) { + list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { child = &devdata->child; if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT)) @@ -3463,7 +3497,7 @@ bool intel_bios_get_dsc_params(struct intel_encoder *encoder, const struct intel_bios_encoder_data *devdata; const struct child_device_config *child; - list_for_each_entry(devdata, &i915->vbt.display_devices, node) { + list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { child = &devdata->child; if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT)) @@ -3494,7 +3528,7 @@ bool intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915, enum port port) { - const struct intel_bios_encoder_data *devdata = i915->vbt.ports[port]; + const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port]; if (drm_WARN_ON_ONCE(&i915->drm, !IS_GEMINILAKE(i915) && !IS_BROXTON(i915))) @@ -3514,7 +3548,7 @@ bool intel_bios_is_lspcon_present(const struct drm_i915_private *i915, enum port port) { - const struct intel_bios_encoder_data *devdata = i915->vbt.ports[port]; + const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port]; return HAS_LSPCON(i915) && devdata && devdata->child.lspcon; } @@ -3530,7 +3564,7 @@ bool intel_bios_is_lane_reversal_needed(const struct drm_i915_private *i915, enum port port) { - const struct intel_bios_encoder_data *devdata = i915->vbt.ports[port]; + const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port]; return devdata && devdata->child.lane_reversal; } @@ -3538,7 +3572,7 @@ intel_bios_is_lane_reversal_needed(const struct drm_i915_private *i915, enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915, enum port port) { - const struct intel_bios_encoder_data *devdata = i915->vbt.ports[port]; + const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port]; enum aux_ch aux_ch; if (!devdata || !devdata->child.aux_channel) { @@ -3576,7 +3610,7 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915, aux_ch = AUX_CH_C; break; case DP_AUX_D: - if (DISPLAY_VER(i915) == 13) + if (DISPLAY_VER(i915) >= 13) aux_ch = AUX_CH_D_XELPD; else if (IS_ALDERLAKE_S(i915)) aux_ch = AUX_CH_USBC3; @@ -3586,7 +3620,7 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915, aux_ch = AUX_CH_D; break; case DP_AUX_E: - if (DISPLAY_VER(i915) == 13) + if (DISPLAY_VER(i915) >= 13) aux_ch = AUX_CH_E_XELPD; else if (IS_ALDERLAKE_S(i915)) aux_ch = AUX_CH_USBC4; @@ -3594,25 +3628,25 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915, aux_ch = AUX_CH_E; break; case DP_AUX_F: - if (DISPLAY_VER(i915) == 13) + if (DISPLAY_VER(i915) >= 13) aux_ch = AUX_CH_USBC1; else aux_ch = AUX_CH_F; break; case DP_AUX_G: - if (DISPLAY_VER(i915) == 13) + if (DISPLAY_VER(i915) >= 13) aux_ch = AUX_CH_USBC2; else aux_ch = AUX_CH_G; break; case DP_AUX_H: - if (DISPLAY_VER(i915) == 13) + if (DISPLAY_VER(i915) >= 13) aux_ch = AUX_CH_USBC3; else aux_ch = AUX_CH_H; break; case DP_AUX_I: - if (DISPLAY_VER(i915) == 13) + if (DISPLAY_VER(i915) >= 13) aux_ch = AUX_CH_USBC4; else aux_ch = AUX_CH_I; @@ -3632,7 +3666,7 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915, int intel_bios_max_tmds_clock(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); - const struct intel_bios_encoder_data *devdata = i915->vbt.ports[encoder->port]; + const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port]; return _intel_bios_max_tmds_clock(devdata); } @@ -3641,14 +3675,14 @@ int intel_bios_max_tmds_clock(struct intel_encoder *encoder) int intel_bios_hdmi_level_shift(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); - const struct intel_bios_encoder_data *devdata = i915->vbt.ports[encoder->port]; + const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port]; return _intel_bios_hdmi_level_shift(devdata); } int intel_bios_encoder_dp_boost_level(const struct intel_bios_encoder_data *devdata) { - if (!devdata || devdata->i915->vbt.version < 196 || !devdata->child.iboost) + if (!devdata || devdata->i915->display.vbt.version < 196 || !devdata->child.iboost) return 0; return translate_iboost(devdata->child.dp_iboost_level); @@ -3656,7 +3690,7 @@ int intel_bios_encoder_dp_boost_level(const struct intel_bios_encoder_data *devd int intel_bios_encoder_hdmi_boost_level(const struct intel_bios_encoder_data *devdata) { - if (!devdata || devdata->i915->vbt.version < 196 || !devdata->child.iboost) + if (!devdata || devdata->i915->display.vbt.version < 196 || !devdata->child.iboost) return 0; return translate_iboost(devdata->child.hdmi_iboost_level); @@ -3665,15 +3699,23 @@ int intel_bios_encoder_hdmi_boost_level(const struct intel_bios_encoder_data *de int intel_bios_dp_max_link_rate(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); - const struct intel_bios_encoder_data *devdata = i915->vbt.ports[encoder->port]; + const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port]; return _intel_bios_dp_max_link_rate(devdata); } +int intel_bios_dp_max_lane_count(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port]; + + return _intel_bios_dp_max_lane_count(devdata); +} + int intel_bios_alternate_ddc_pin(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); - const struct intel_bios_encoder_data *devdata = i915->vbt.ports[encoder->port]; + const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port]; if (!devdata || !devdata->child.ddc_pin) return 0; @@ -3683,16 +3725,16 @@ int intel_bios_alternate_ddc_pin(struct intel_encoder *encoder) bool intel_bios_encoder_supports_typec_usb(const struct intel_bios_encoder_data *devdata) { - return devdata->i915->vbt.version >= 195 && devdata->child.dp_usb_type_c; + return devdata->i915->display.vbt.version >= 195 && devdata->child.dp_usb_type_c; } bool intel_bios_encoder_supports_tbt(const struct intel_bios_encoder_data *devdata) { - return devdata->i915->vbt.version >= 209 && devdata->child.tbt; + return devdata->i915->display.vbt.version >= 209 && devdata->child.tbt; } const struct intel_bios_encoder_data * intel_bios_encoder_data_lookup(struct drm_i915_private *i915, enum port port) { - return i915->vbt.ports[port]; + return i915->display.vbt.ports[port]; } diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h index e47582b0de0a..e375405a7828 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.h +++ b/drivers/gpu/drm/i915/display/intel_bios.h @@ -258,6 +258,7 @@ bool intel_bios_get_dsc_params(struct intel_encoder *encoder, int intel_bios_max_tmds_clock(struct intel_encoder *encoder); int intel_bios_hdmi_level_shift(struct intel_encoder *encoder); int intel_bios_dp_max_link_rate(struct intel_encoder *encoder); +int intel_bios_dp_max_lane_count(struct intel_encoder *encoder); int intel_bios_alternate_ddc_pin(struct intel_encoder *encoder); bool intel_bios_port_supports_typec_usb(struct drm_i915_private *i915, enum port port); bool intel_bios_port_supports_tbt(struct drm_i915_private *i915, enum port port); diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c index 79269d2c476b..4ace026b29bd 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.c +++ b/drivers/gpu/drm/i915/display/intel_bw.c @@ -5,15 +5,17 @@ #include <drm/drm_atomic_state_helper.h> +#include "i915_drv.h" #include "i915_reg.h" #include "i915_utils.h" #include "intel_atomic.h" #include "intel_bw.h" #include "intel_cdclk.h" +#include "intel_display_core.h" #include "intel_display_types.h" +#include "skl_watermark.h" #include "intel_mchbar_regs.h" #include "intel_pcode.h" -#include "intel_pm.h" /* Parameters for Qclk Geyserville (QGV) */ struct intel_qgv_point { @@ -137,6 +139,42 @@ int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv, return 0; } +static int mtl_read_qgv_point_info(struct drm_i915_private *dev_priv, + struct intel_qgv_point *sp, int point) +{ + u32 val, val2; + u16 dclk; + + val = intel_uncore_read(&dev_priv->uncore, + MTL_MEM_SS_INFO_QGV_POINT_LOW(point)); + val2 = intel_uncore_read(&dev_priv->uncore, + MTL_MEM_SS_INFO_QGV_POINT_HIGH(point)); + dclk = REG_FIELD_GET(MTL_DCLK_MASK, val); + sp->dclk = DIV_ROUND_UP((16667 * dclk), 1000); + sp->t_rp = REG_FIELD_GET(MTL_TRP_MASK, val); + sp->t_rcd = REG_FIELD_GET(MTL_TRCD_MASK, val); + + sp->t_rdpre = REG_FIELD_GET(MTL_TRDPRE_MASK, val2); + sp->t_ras = REG_FIELD_GET(MTL_TRAS_MASK, val2); + + sp->t_rc = sp->t_rp + sp->t_ras; + + return 0; +} + +static int +intel_read_qgv_point_info(struct drm_i915_private *dev_priv, + struct intel_qgv_point *sp, + int point) +{ + if (DISPLAY_VER(dev_priv) >= 14) + return mtl_read_qgv_point_info(dev_priv, sp, point); + else if (IS_DG1(dev_priv)) + return dg1_mchbar_read_qgv_point_info(dev_priv, sp, point); + else + return icl_pcode_read_qgv_point_info(dev_priv, sp, point); +} + static int icl_get_qgv_points(struct drm_i915_private *dev_priv, struct intel_qgv_info *qi, bool is_y_tile) @@ -147,7 +185,32 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv, qi->num_points = dram_info->num_qgv_points; qi->num_psf_points = dram_info->num_psf_gv_points; - if (DISPLAY_VER(dev_priv) >= 12) + if (DISPLAY_VER(dev_priv) >= 14) { + switch (dram_info->type) { + case INTEL_DRAM_DDR4: + qi->t_bl = 4; + qi->max_numchannels = 2; + qi->channel_width = 64; + qi->deinterleave = 2; + break; + case INTEL_DRAM_DDR5: + qi->t_bl = 8; + qi->max_numchannels = 4; + qi->channel_width = 32; + qi->deinterleave = 2; + break; + case INTEL_DRAM_LPDDR4: + case INTEL_DRAM_LPDDR5: + qi->t_bl = 16; + qi->max_numchannels = 8; + qi->channel_width = 16; + qi->deinterleave = 4; + break; + default: + MISSING_CASE(dram_info->type); + return -EINVAL; + } + } else if (DISPLAY_VER(dev_priv) >= 12) { switch (dram_info->type) { case INTEL_DRAM_DDR4: qi->t_bl = is_y_tile ? 8 : 4; @@ -181,7 +244,7 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv, qi->max_numchannels = 1; break; } - else if (DISPLAY_VER(dev_priv) == 11) { + } else if (DISPLAY_VER(dev_priv) == 11) { qi->t_bl = dev_priv->dram_info.type == INTEL_DRAM_DDR4 ? 4 : 8; qi->max_numchannels = 1; } @@ -193,11 +256,7 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv, for (i = 0; i < qi->num_points; i++) { struct intel_qgv_point *sp = &qi->points[i]; - if (IS_DG1(dev_priv)) - ret = dg1_mchbar_read_qgv_point_info(dev_priv, sp, i); - else - ret = icl_pcode_read_qgv_point_info(dev_priv, sp, i); - + ret = intel_read_qgv_point_info(dev_priv, sp, i); if (ret) return ret; @@ -284,6 +343,13 @@ static const struct intel_sa_info adlp_sa_info = { .derating = 20, }; +static const struct intel_sa_info mtl_sa_info = { + .deburst = 32, + .deprogbwlimit = 38, /* GB/s */ + .displayrtids = 256, + .derating = 20, +}; + static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa) { struct intel_qgv_info qi = {}; @@ -292,7 +358,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel int ipqdepth, ipqdepthpch = 16; int dclk_max; int maxdebw; - int num_groups = ARRAY_SIZE(dev_priv->max_bw); + int num_groups = ARRAY_SIZE(dev_priv->display.bw.max); int i, ret; ret = icl_get_qgv_points(dev_priv, &qi, is_y_tile); @@ -308,7 +374,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel qi.deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2); for (i = 0; i < num_groups; i++) { - struct intel_bw_info *bi = &dev_priv->max_bw[i]; + struct intel_bw_info *bi = &dev_priv->display.bw.max[i]; int clpchgroup; int j; @@ -346,9 +412,9 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel * as it will fail and pointless anyway. */ if (qi.num_points == 1) - dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED; + dev_priv->display.sagv.status = I915_SAGV_NOT_CONTROLLED; else - dev_priv->sagv_status = I915_SAGV_ENABLED; + dev_priv->display.sagv.status = I915_SAGV_ENABLED; return 0; } @@ -363,7 +429,7 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel int dclk_max; int maxdebw, peakbw; int clperchgroup; - int num_groups = ARRAY_SIZE(dev_priv->max_bw); + int num_groups = ARRAY_SIZE(dev_priv->display.bw.max); int i, ret; ret = icl_get_qgv_points(dev_priv, &qi, is_y_tile); @@ -399,20 +465,22 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel clperchgroup = 4 * DIV_ROUND_UP(8, num_channels) * qi.deinterleave; for (i = 0; i < num_groups; i++) { - struct intel_bw_info *bi = &dev_priv->max_bw[i]; + struct intel_bw_info *bi = &dev_priv->display.bw.max[i]; struct intel_bw_info *bi_next; int clpchgroup; int j; - if (i < num_groups - 1) - bi_next = &dev_priv->max_bw[i + 1]; - clpchgroup = (sa->deburst * qi.deinterleave / num_channels) << i; - if (i < num_groups - 1 && clpchgroup < clperchgroup) - bi_next->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1; - else - bi_next->num_planes = 0; + if (i < num_groups - 1) { + bi_next = &dev_priv->display.bw.max[i + 1]; + + if (clpchgroup < clperchgroup) + bi_next->num_planes = (ipqdepth - clpchgroup) / + clpchgroup + 1; + else + bi_next->num_planes = 0; + } bi->num_qgv_points = qi.num_points; bi->num_psf_gv_points = qi.num_psf_points; @@ -456,9 +524,9 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel * as it will fail and pointless anyway. */ if (qi.num_points == 1) - dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED; + dev_priv->display.sagv.status = I915_SAGV_NOT_CONTROLLED; else - dev_priv->sagv_status = I915_SAGV_ENABLED; + dev_priv->display.sagv.status = I915_SAGV_ENABLED; return 0; } @@ -466,7 +534,7 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel static void dg2_get_bw_info(struct drm_i915_private *i915) { unsigned int deratedbw = IS_DG2_G11(i915) ? 38000 : 50000; - int num_groups = ARRAY_SIZE(i915->max_bw); + int num_groups = ARRAY_SIZE(i915->display.bw.max); int i; /* @@ -477,7 +545,7 @@ static void dg2_get_bw_info(struct drm_i915_private *i915) * whereas DG2-G11 platforms have 38 GB/s. */ for (i = 0; i < num_groups; i++) { - struct intel_bw_info *bi = &i915->max_bw[i]; + struct intel_bw_info *bi = &i915->display.bw.max[i]; bi->num_planes = 1; /* Need only one dummy QGV point per group */ @@ -485,7 +553,7 @@ static void dg2_get_bw_info(struct drm_i915_private *i915) bi->deratedbw[0] = deratedbw; } - i915->sagv_status = I915_SAGV_NOT_CONTROLLED; + i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED; } static unsigned int icl_max_bw(struct drm_i915_private *dev_priv, @@ -498,9 +566,9 @@ static unsigned int icl_max_bw(struct drm_i915_private *dev_priv, */ num_planes = max(1, num_planes); - for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) { + for (i = 0; i < ARRAY_SIZE(dev_priv->display.bw.max); i++) { const struct intel_bw_info *bi = - &dev_priv->max_bw[i]; + &dev_priv->display.bw.max[i]; /* * Pcode will not expose all QGV points when @@ -526,9 +594,9 @@ static unsigned int tgl_max_bw(struct drm_i915_private *dev_priv, */ num_planes = max(1, num_planes); - for (i = ARRAY_SIZE(dev_priv->max_bw) - 1; i >= 0; i--) { + for (i = ARRAY_SIZE(dev_priv->display.bw.max) - 1; i >= 0; i--) { const struct intel_bw_info *bi = - &dev_priv->max_bw[i]; + &dev_priv->display.bw.max[i]; /* * Pcode will not expose all QGV points when @@ -541,14 +609,14 @@ static unsigned int tgl_max_bw(struct drm_i915_private *dev_priv, return bi->deratedbw[qgv_point]; } - return dev_priv->max_bw[0].deratedbw[qgv_point]; + return dev_priv->display.bw.max[0].deratedbw[qgv_point]; } static unsigned int adl_psf_bw(struct drm_i915_private *dev_priv, int psf_gv_point) { const struct intel_bw_info *bi = - &dev_priv->max_bw[0]; + &dev_priv->display.bw.max[0]; return bi->psf_bw[psf_gv_point]; } @@ -558,7 +626,9 @@ void intel_bw_init_hw(struct drm_i915_private *dev_priv) if (!HAS_DISPLAY(dev_priv)) return; - if (IS_DG2(dev_priv)) + if (DISPLAY_VER(dev_priv) >= 14) + tgl_get_bw_info(dev_priv, &mtl_sa_info); + else if (IS_DG2(dev_priv)) dg2_get_bw_info(dev_priv); else if (IS_ALDERLAKE_P(dev_priv)) tgl_get_bw_info(dev_priv, &adlp_sa_info); @@ -667,7 +737,7 @@ intel_atomic_get_old_bw_state(struct intel_atomic_state *state) struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_global_state *bw_state; - bw_state = intel_atomic_get_old_global_obj_state(state, &dev_priv->bw_obj); + bw_state = intel_atomic_get_old_global_obj_state(state, &dev_priv->display.bw.obj); return to_intel_bw_state(bw_state); } @@ -678,7 +748,7 @@ intel_atomic_get_new_bw_state(struct intel_atomic_state *state) struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_global_state *bw_state; - bw_state = intel_atomic_get_new_global_obj_state(state, &dev_priv->bw_obj); + bw_state = intel_atomic_get_new_global_obj_state(state, &dev_priv->display.bw.obj); return to_intel_bw_state(bw_state); } @@ -689,7 +759,7 @@ intel_atomic_get_bw_state(struct intel_atomic_state *state) struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_global_state *bw_state; - bw_state = intel_atomic_get_global_obj_state(state, &dev_priv->bw_obj); + bw_state = intel_atomic_get_global_obj_state(state, &dev_priv->display.bw.obj); if (IS_ERR(bw_state)) return ERR_CAST(bw_state); @@ -896,8 +966,8 @@ int intel_bw_calc_min_cdclk(struct intel_atomic_state *state, static u16 icl_qgv_points_mask(struct drm_i915_private *i915) { - unsigned int num_psf_gv_points = i915->max_bw[0].num_psf_gv_points; - unsigned int num_qgv_points = i915->max_bw[0].num_qgv_points; + unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points; + unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points; u16 qgv_points = 0, psf_points = 0; /* @@ -970,8 +1040,8 @@ int intel_bw_atomic_check(struct intel_atomic_state *state) int i, ret; u16 qgv_points = 0, psf_points = 0; unsigned int max_bw_point = 0, max_bw = 0; - unsigned int num_qgv_points = dev_priv->max_bw[0].num_qgv_points; - unsigned int num_psf_gv_points = dev_priv->max_bw[0].num_psf_gv_points; + unsigned int num_qgv_points = dev_priv->display.bw.max[0].num_qgv_points; + unsigned int num_psf_gv_points = dev_priv->display.bw.max[0].num_psf_gv_points; bool changed = false; /* FIXME earlier gens need some checks too */ @@ -1126,7 +1196,7 @@ int intel_bw_init(struct drm_i915_private *dev_priv) if (!state) return -ENOMEM; - intel_atomic_global_obj_init(dev_priv, &dev_priv->bw_obj, + intel_atomic_global_obj_init(dev_priv, &dev_priv->display.bw.obj, &state->base, &intel_bw_funcs); return 0; diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 6e80162632dd..ed05070b7307 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -79,26 +79,26 @@ struct intel_cdclk_funcs { void intel_cdclk_get_cdclk(struct drm_i915_private *dev_priv, struct intel_cdclk_config *cdclk_config) { - dev_priv->cdclk_funcs->get_cdclk(dev_priv, cdclk_config); + dev_priv->display.funcs.cdclk->get_cdclk(dev_priv, cdclk_config); } static void intel_cdclk_set_cdclk(struct drm_i915_private *dev_priv, const struct intel_cdclk_config *cdclk_config, enum pipe pipe) { - dev_priv->cdclk_funcs->set_cdclk(dev_priv, cdclk_config, pipe); + dev_priv->display.funcs.cdclk->set_cdclk(dev_priv, cdclk_config, pipe); } static int intel_cdclk_modeset_calc_cdclk(struct drm_i915_private *dev_priv, struct intel_cdclk_state *cdclk_config) { - return dev_priv->cdclk_funcs->modeset_calc_cdclk(cdclk_config); + return dev_priv->display.funcs.cdclk->modeset_calc_cdclk(cdclk_config); } static u8 intel_cdclk_calc_voltage_level(struct drm_i915_private *dev_priv, int cdclk) { - return dev_priv->cdclk_funcs->calc_voltage_level(cdclk); + return dev_priv->display.funcs.cdclk->calc_voltage_level(cdclk); } static void fixed_133mhz_get_cdclk(struct drm_i915_private *dev_priv, @@ -548,7 +548,7 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv) else default_credits = PFI_CREDIT(8); - if (dev_priv->cdclk.hw.cdclk >= dev_priv->czclk_freq) { + if (dev_priv->display.cdclk.hw.cdclk >= dev_priv->czclk_freq) { /* CHV suggested value is 31 or 63 */ if (IS_CHERRYVIEW(dev_priv)) credits = PFI_CREDIT_63; @@ -1026,7 +1026,7 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco) if (intel_de_wait_for_set(dev_priv, LCPLL1_CTL, LCPLL_PLL_LOCK, 5)) drm_err(&dev_priv->drm, "DPLL0 not locked\n"); - dev_priv->cdclk.hw.vco = vco; + dev_priv->display.cdclk.hw.vco = vco; /* We'll want to keep using the current vco from now on. */ skl_set_preferred_cdclk_vco(dev_priv, vco); @@ -1040,7 +1040,7 @@ static void skl_dpll0_disable(struct drm_i915_private *dev_priv) if (intel_de_wait_for_clear(dev_priv, LCPLL1_CTL, LCPLL_PLL_LOCK, 1)) drm_err(&dev_priv->drm, "Couldn't disable DPLL0\n"); - dev_priv->cdclk.hw.vco = 0; + dev_priv->display.cdclk.hw.vco = 0; } static u32 skl_cdclk_freq_sel(struct drm_i915_private *dev_priv, @@ -1049,7 +1049,7 @@ static u32 skl_cdclk_freq_sel(struct drm_i915_private *dev_priv, switch (cdclk) { default: drm_WARN_ON(&dev_priv->drm, - cdclk != dev_priv->cdclk.hw.bypass); + cdclk != dev_priv->display.cdclk.hw.bypass); drm_WARN_ON(&dev_priv->drm, vco != 0); fallthrough; case 308571: @@ -1098,13 +1098,13 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, freq_select = skl_cdclk_freq_sel(dev_priv, cdclk, vco); - if (dev_priv->cdclk.hw.vco != 0 && - dev_priv->cdclk.hw.vco != vco) + if (dev_priv->display.cdclk.hw.vco != 0 && + dev_priv->display.cdclk.hw.vco != vco) skl_dpll0_disable(dev_priv); cdclk_ctl = intel_de_read(dev_priv, CDCLK_CTL); - if (dev_priv->cdclk.hw.vco != vco) { + if (dev_priv->display.cdclk.hw.vco != vco) { /* Wa Display #1183: skl,kbl,cfl */ cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK); cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk); @@ -1116,7 +1116,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl); intel_de_posting_read(dev_priv, CDCLK_CTL); - if (dev_priv->cdclk.hw.vco != vco) + if (dev_priv->display.cdclk.hw.vco != vco) skl_dpll0_enable(dev_priv, vco); /* Wa Display #1183: skl,kbl,cfl */ @@ -1151,11 +1151,11 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv) goto sanitize; intel_update_cdclk(dev_priv); - intel_cdclk_dump_config(dev_priv, &dev_priv->cdclk.hw, "Current CDCLK"); + intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "Current CDCLK"); /* Is PLL enabled and locked ? */ - if (dev_priv->cdclk.hw.vco == 0 || - dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass) + if (dev_priv->display.cdclk.hw.vco == 0 || + dev_priv->display.cdclk.hw.cdclk == dev_priv->display.cdclk.hw.bypass) goto sanitize; /* DPLL okay; verify the cdclock @@ -1166,7 +1166,7 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv) */ cdctl = intel_de_read(dev_priv, CDCLK_CTL); expected = (cdctl & CDCLK_FREQ_SEL_MASK) | - skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk); + skl_cdclk_decimal(dev_priv->display.cdclk.hw.cdclk); if (cdctl == expected) /* All well; nothing to sanitize */ return; @@ -1175,9 +1175,9 @@ sanitize: drm_dbg_kms(&dev_priv->drm, "Sanitizing cdclk programmed by pre-os\n"); /* force cdclk programming */ - dev_priv->cdclk.hw.cdclk = 0; + dev_priv->display.cdclk.hw.cdclk = 0; /* force full PLL disable + enable */ - dev_priv->cdclk.hw.vco = -1; + dev_priv->display.cdclk.hw.vco = -1; } static void skl_cdclk_init_hw(struct drm_i915_private *dev_priv) @@ -1186,19 +1186,19 @@ static void skl_cdclk_init_hw(struct drm_i915_private *dev_priv) skl_sanitize_cdclk(dev_priv); - if (dev_priv->cdclk.hw.cdclk != 0 && - dev_priv->cdclk.hw.vco != 0) { + if (dev_priv->display.cdclk.hw.cdclk != 0 && + dev_priv->display.cdclk.hw.vco != 0) { /* * Use the current vco as our initial * guess as to what the preferred vco is. */ if (dev_priv->skl_preferred_vco_freq == 0) skl_set_preferred_cdclk_vco(dev_priv, - dev_priv->cdclk.hw.vco); + dev_priv->display.cdclk.hw.vco); return; } - cdclk_config = dev_priv->cdclk.hw; + cdclk_config = dev_priv->display.cdclk.hw; cdclk_config.vco = dev_priv->skl_preferred_vco_freq; if (cdclk_config.vco == 0) @@ -1211,7 +1211,7 @@ static void skl_cdclk_init_hw(struct drm_i915_private *dev_priv) static void skl_cdclk_uninit_hw(struct drm_i915_private *dev_priv) { - struct intel_cdclk_config cdclk_config = dev_priv->cdclk.hw; + struct intel_cdclk_config cdclk_config = dev_priv->display.cdclk.hw; cdclk_config.cdclk = cdclk_config.bypass; cdclk_config.vco = 0; @@ -1352,35 +1352,35 @@ static const struct intel_cdclk_vals dg2_cdclk_table[] = { static int bxt_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk) { - const struct intel_cdclk_vals *table = dev_priv->cdclk.table; + const struct intel_cdclk_vals *table = dev_priv->display.cdclk.table; int i; for (i = 0; table[i].refclk; i++) - if (table[i].refclk == dev_priv->cdclk.hw.ref && + if (table[i].refclk == dev_priv->display.cdclk.hw.ref && table[i].cdclk >= min_cdclk) return table[i].cdclk; drm_WARN(&dev_priv->drm, 1, "Cannot satisfy minimum cdclk %d with refclk %u\n", - min_cdclk, dev_priv->cdclk.hw.ref); + min_cdclk, dev_priv->display.cdclk.hw.ref); return 0; } static int bxt_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk) { - const struct intel_cdclk_vals *table = dev_priv->cdclk.table; + const struct intel_cdclk_vals *table = dev_priv->display.cdclk.table; int i; - if (cdclk == dev_priv->cdclk.hw.bypass) + if (cdclk == dev_priv->display.cdclk.hw.bypass) return 0; for (i = 0; table[i].refclk; i++) - if (table[i].refclk == dev_priv->cdclk.hw.ref && + if (table[i].refclk == dev_priv->display.cdclk.hw.ref && table[i].cdclk == cdclk) - return dev_priv->cdclk.hw.ref * table[i].ratio; + return dev_priv->display.cdclk.hw.ref * table[i].ratio; drm_WARN(&dev_priv->drm, 1, "cdclk %d not valid for refclk %u\n", - cdclk, dev_priv->cdclk.hw.ref); + cdclk, dev_priv->display.cdclk.hw.ref); return 0; } @@ -1554,12 +1554,12 @@ static void bxt_de_pll_disable(struct drm_i915_private *dev_priv) BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1)) drm_err(&dev_priv->drm, "timeout waiting for DE PLL unlock\n"); - dev_priv->cdclk.hw.vco = 0; + dev_priv->display.cdclk.hw.vco = 0; } static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco) { - int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk.hw.ref); + int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->display.cdclk.hw.ref); intel_de_rmw(dev_priv, BXT_DE_PLL_CTL, BXT_DE_PLL_RATIO_MASK, BXT_DE_PLL_RATIO(ratio)); @@ -1571,7 +1571,7 @@ static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco) BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1)) drm_err(&dev_priv->drm, "timeout waiting for DE PLL lock\n"); - dev_priv->cdclk.hw.vco = vco; + dev_priv->display.cdclk.hw.vco = vco; } static void icl_cdclk_pll_disable(struct drm_i915_private *dev_priv) @@ -1583,12 +1583,12 @@ static void icl_cdclk_pll_disable(struct drm_i915_private *dev_priv) if (intel_de_wait_for_clear(dev_priv, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1)) drm_err(&dev_priv->drm, "timeout waiting for CDCLK PLL unlock\n"); - dev_priv->cdclk.hw.vco = 0; + dev_priv->display.cdclk.hw.vco = 0; } static void icl_cdclk_pll_enable(struct drm_i915_private *dev_priv, int vco) { - int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk.hw.ref); + int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->display.cdclk.hw.ref); u32 val; val = ICL_CDCLK_PLL_RATIO(ratio); @@ -1601,12 +1601,12 @@ static void icl_cdclk_pll_enable(struct drm_i915_private *dev_priv, int vco) if (intel_de_wait_for_set(dev_priv, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1)) drm_err(&dev_priv->drm, "timeout waiting for CDCLK PLL lock\n"); - dev_priv->cdclk.hw.vco = vco; + dev_priv->display.cdclk.hw.vco = vco; } static void adlp_cdclk_pll_crawl(struct drm_i915_private *dev_priv, int vco) { - int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk.hw.ref); + int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->display.cdclk.hw.ref); u32 val; /* Write PLL ratio without disabling */ @@ -1625,7 +1625,7 @@ static void adlp_cdclk_pll_crawl(struct drm_i915_private *dev_priv, int vco) val &= ~BXT_DE_PLL_FREQ_REQ; intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val); - dev_priv->cdclk.hw.vco = vco; + dev_priv->display.cdclk.hw.vco = vco; } static u32 bxt_cdclk_cd2x_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) @@ -1655,7 +1655,7 @@ static u32 bxt_cdclk_cd2x_div_sel(struct drm_i915_private *dev_priv, switch (DIV_ROUND_CLOSEST(vco, cdclk)) { default: drm_WARN_ON(&dev_priv->drm, - cdclk != dev_priv->cdclk.hw.bypass); + cdclk != dev_priv->display.cdclk.hw.bypass); drm_WARN_ON(&dev_priv->drm, vco != 0); fallthrough; case 2: @@ -1672,19 +1672,19 @@ static u32 bxt_cdclk_cd2x_div_sel(struct drm_i915_private *dev_priv, static u32 cdclk_squash_waveform(struct drm_i915_private *dev_priv, int cdclk) { - const struct intel_cdclk_vals *table = dev_priv->cdclk.table; + const struct intel_cdclk_vals *table = dev_priv->display.cdclk.table; int i; - if (cdclk == dev_priv->cdclk.hw.bypass) + if (cdclk == dev_priv->display.cdclk.hw.bypass) return 0; for (i = 0; table[i].refclk; i++) - if (table[i].refclk == dev_priv->cdclk.hw.ref && + if (table[i].refclk == dev_priv->display.cdclk.hw.ref && table[i].cdclk == cdclk) return table[i].waveform; drm_WARN(&dev_priv->drm, 1, "cdclk %d not valid for refclk %u\n", - cdclk, dev_priv->cdclk.hw.ref); + cdclk, dev_priv->display.cdclk.hw.ref); return 0xffff; } @@ -1721,22 +1721,22 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, return; } - if (HAS_CDCLK_CRAWL(dev_priv) && dev_priv->cdclk.hw.vco > 0 && vco > 0) { - if (dev_priv->cdclk.hw.vco != vco) + if (HAS_CDCLK_CRAWL(dev_priv) && dev_priv->display.cdclk.hw.vco > 0 && vco > 0) { + if (dev_priv->display.cdclk.hw.vco != vco) adlp_cdclk_pll_crawl(dev_priv, vco); } else if (DISPLAY_VER(dev_priv) >= 11) { - if (dev_priv->cdclk.hw.vco != 0 && - dev_priv->cdclk.hw.vco != vco) + if (dev_priv->display.cdclk.hw.vco != 0 && + dev_priv->display.cdclk.hw.vco != vco) icl_cdclk_pll_disable(dev_priv); - if (dev_priv->cdclk.hw.vco != vco) + if (dev_priv->display.cdclk.hw.vco != vco) icl_cdclk_pll_enable(dev_priv, vco); } else { - if (dev_priv->cdclk.hw.vco != 0 && - dev_priv->cdclk.hw.vco != vco) + if (dev_priv->display.cdclk.hw.vco != 0 && + dev_priv->display.cdclk.hw.vco != vco) bxt_de_pll_disable(dev_priv); - if (dev_priv->cdclk.hw.vco != vco) + if (dev_priv->display.cdclk.hw.vco != vco) bxt_de_pll_enable(dev_priv, vco); } @@ -1803,7 +1803,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, * Can't read out the voltage level :( * Let's just assume everything is as expected. */ - dev_priv->cdclk.hw.voltage_level = cdclk_config->voltage_level; + dev_priv->display.cdclk.hw.voltage_level = cdclk_config->voltage_level; } static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv) @@ -1812,10 +1812,10 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv) int cdclk, clock, vco; intel_update_cdclk(dev_priv); - intel_cdclk_dump_config(dev_priv, &dev_priv->cdclk.hw, "Current CDCLK"); + intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "Current CDCLK"); - if (dev_priv->cdclk.hw.vco == 0 || - dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass) + if (dev_priv->display.cdclk.hw.vco == 0 || + dev_priv->display.cdclk.hw.cdclk == dev_priv->display.cdclk.hw.bypass) goto sanitize; /* DPLL okay; verify the cdclock @@ -1833,32 +1833,32 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv) cdctl &= ~bxt_cdclk_cd2x_pipe(dev_priv, INVALID_PIPE); /* Make sure this is a legal cdclk value for the platform */ - cdclk = bxt_calc_cdclk(dev_priv, dev_priv->cdclk.hw.cdclk); - if (cdclk != dev_priv->cdclk.hw.cdclk) + cdclk = bxt_calc_cdclk(dev_priv, dev_priv->display.cdclk.hw.cdclk); + if (cdclk != dev_priv->display.cdclk.hw.cdclk) goto sanitize; /* Make sure the VCO is correct for the cdclk */ vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk); - if (vco != dev_priv->cdclk.hw.vco) + if (vco != dev_priv->display.cdclk.hw.vco) goto sanitize; expected = skl_cdclk_decimal(cdclk); /* Figure out what CD2X divider we should be using for this cdclk */ if (has_cdclk_squasher(dev_priv)) - clock = dev_priv->cdclk.hw.vco / 2; + clock = dev_priv->display.cdclk.hw.vco / 2; else - clock = dev_priv->cdclk.hw.cdclk; + clock = dev_priv->display.cdclk.hw.cdclk; expected |= bxt_cdclk_cd2x_div_sel(dev_priv, clock, - dev_priv->cdclk.hw.vco); + dev_priv->display.cdclk.hw.vco); /* * Disable SSA Precharge when CD clock frequency < 500 MHz, * enable otherwise. */ if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && - dev_priv->cdclk.hw.cdclk >= 500000) + dev_priv->display.cdclk.hw.cdclk >= 500000) expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE; if (cdctl == expected) @@ -1869,10 +1869,10 @@ sanitize: drm_dbg_kms(&dev_priv->drm, "Sanitizing cdclk programmed by pre-os\n"); /* force cdclk programming */ - dev_priv->cdclk.hw.cdclk = 0; + dev_priv->display.cdclk.hw.cdclk = 0; /* force full PLL disable + enable */ - dev_priv->cdclk.hw.vco = -1; + dev_priv->display.cdclk.hw.vco = -1; } static void bxt_cdclk_init_hw(struct drm_i915_private *dev_priv) @@ -1881,11 +1881,11 @@ static void bxt_cdclk_init_hw(struct drm_i915_private *dev_priv) bxt_sanitize_cdclk(dev_priv); - if (dev_priv->cdclk.hw.cdclk != 0 && - dev_priv->cdclk.hw.vco != 0) + if (dev_priv->display.cdclk.hw.cdclk != 0 && + dev_priv->display.cdclk.hw.vco != 0) return; - cdclk_config = dev_priv->cdclk.hw; + cdclk_config = dev_priv->display.cdclk.hw; /* * FIXME: @@ -1902,7 +1902,7 @@ static void bxt_cdclk_init_hw(struct drm_i915_private *dev_priv) static void bxt_cdclk_uninit_hw(struct drm_i915_private *dev_priv) { - struct intel_cdclk_config cdclk_config = dev_priv->cdclk.hw; + struct intel_cdclk_config cdclk_config = dev_priv->display.cdclk.hw; cdclk_config.cdclk = cdclk_config.bypass; cdclk_config.vco = 0; @@ -1916,7 +1916,7 @@ static void bxt_cdclk_uninit_hw(struct drm_i915_private *dev_priv) * intel_cdclk_init_hw - Initialize CDCLK hardware * @i915: i915 device * - * Initialize CDCLK. This consists mainly of initializing dev_priv->cdclk.hw and + * Initialize CDCLK. This consists mainly of initializing dev_priv->display.cdclk.hw and * sanitizing the state of the hardware if needed. This is generally done only * during the display core initialization sequence, after which the DMC will * take care of turning CDCLK off/on as needed. @@ -2077,10 +2077,10 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv, { struct intel_encoder *encoder; - if (!intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_config)) + if (!intel_cdclk_changed(&dev_priv->display.cdclk.hw, cdclk_config)) return; - if (drm_WARN_ON_ONCE(&dev_priv->drm, !dev_priv->cdclk_funcs->set_cdclk)) + if (drm_WARN_ON_ONCE(&dev_priv->drm, !dev_priv->display.funcs.cdclk->set_cdclk)) return; intel_cdclk_dump_config(dev_priv, cdclk_config, "Changing CDCLK to"); @@ -2098,12 +2098,12 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv, * functions use cdclk. Not all platforms/ports do, * but we'll lock them all for simplicity. */ - mutex_lock(&dev_priv->gmbus_mutex); + mutex_lock(&dev_priv->display.gmbus.mutex); for_each_intel_dp(&dev_priv->drm, encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); mutex_lock_nest_lock(&intel_dp->aux.hw_mutex, - &dev_priv->gmbus_mutex); + &dev_priv->display.gmbus.mutex); } intel_cdclk_set_cdclk(dev_priv, cdclk_config, pipe); @@ -2113,7 +2113,7 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv, mutex_unlock(&intel_dp->aux.hw_mutex); } - mutex_unlock(&dev_priv->gmbus_mutex); + mutex_unlock(&dev_priv->display.gmbus.mutex); for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); @@ -2124,9 +2124,9 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv, intel_audio_cdclk_change_post(dev_priv); if (drm_WARN(&dev_priv->drm, - intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_config), + intel_cdclk_changed(&dev_priv->display.cdclk.hw, cdclk_config), "cdclk state doesn't match!\n")) { - intel_cdclk_dump_config(dev_priv, &dev_priv->cdclk.hw, "[hw state]"); + intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "[hw state]"); intel_cdclk_dump_config(dev_priv, cdclk_config, "[sw state]"); } } @@ -2300,7 +2300,7 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state) min_cdclk = max(min_cdclk, (int)crtc_state->pixel_rate); /* - * HACK. Currently for TGL platforms we calculate + * HACK. Currently for TGL/DG2 platforms we calculate * min_cdclk initially based on pixel_rate divided * by 2, accounting for also plane requirements, * however in some cases the lowest possible CDCLK @@ -2308,14 +2308,14 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state) * Explicitly stating here that this seems to be currently * rather a Hack, than final solution. */ - if (IS_TIGERLAKE(dev_priv)) { + if (IS_TIGERLAKE(dev_priv) || IS_DG2(dev_priv)) { /* * Clamp to max_cdclk_freq in case pixel rate is higher, * in order not to break an 8K, but still leave W/A at place. */ min_cdclk = max_t(int, min_cdclk, min_t(int, crtc_state->pixel_rate, - dev_priv->max_cdclk_freq)); + dev_priv->display.cdclk.max_cdclk_freq)); } return min_cdclk; @@ -2368,10 +2368,10 @@ static int intel_compute_min_cdclk(struct intel_cdclk_state *cdclk_state) for_each_pipe(dev_priv, pipe) min_cdclk = max(cdclk_state->min_cdclk[pipe], min_cdclk); - if (min_cdclk > dev_priv->max_cdclk_freq) { + if (min_cdclk > dev_priv->display.cdclk.max_cdclk_freq) { drm_dbg_kms(&dev_priv->drm, "required cdclk (%d kHz) exceeds max (%d kHz)\n", - min_cdclk, dev_priv->max_cdclk_freq); + min_cdclk, dev_priv->display.cdclk.max_cdclk_freq); return -EINVAL; } @@ -2643,7 +2643,7 @@ intel_atomic_get_cdclk_state(struct intel_atomic_state *state) struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_global_state *cdclk_state; - cdclk_state = intel_atomic_get_global_obj_state(state, &dev_priv->cdclk.obj); + cdclk_state = intel_atomic_get_global_obj_state(state, &dev_priv->display.cdclk.obj); if (IS_ERR(cdclk_state)) return ERR_CAST(cdclk_state); @@ -2693,7 +2693,7 @@ int intel_cdclk_init(struct drm_i915_private *dev_priv) if (!cdclk_state) return -ENOMEM; - intel_atomic_global_obj_init(dev_priv, &dev_priv->cdclk.obj, + intel_atomic_global_obj_init(dev_priv, &dev_priv->display.cdclk.obj, &cdclk_state->base, &intel_cdclk_funcs); return 0; @@ -2799,7 +2799,7 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state) static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv) { - int max_cdclk_freq = dev_priv->max_cdclk_freq; + int max_cdclk_freq = dev_priv->display.cdclk.max_cdclk_freq; if (DISPLAY_VER(dev_priv) >= 10) return 2 * max_cdclk_freq; @@ -2825,19 +2825,19 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv) void intel_update_max_cdclk(struct drm_i915_private *dev_priv) { if (IS_JSL_EHL(dev_priv)) { - if (dev_priv->cdclk.hw.ref == 24000) - dev_priv->max_cdclk_freq = 552000; + if (dev_priv->display.cdclk.hw.ref == 24000) + dev_priv->display.cdclk.max_cdclk_freq = 552000; else - dev_priv->max_cdclk_freq = 556800; + dev_priv->display.cdclk.max_cdclk_freq = 556800; } else if (DISPLAY_VER(dev_priv) >= 11) { - if (dev_priv->cdclk.hw.ref == 24000) - dev_priv->max_cdclk_freq = 648000; + if (dev_priv->display.cdclk.hw.ref == 24000) + dev_priv->display.cdclk.max_cdclk_freq = 648000; else - dev_priv->max_cdclk_freq = 652800; + dev_priv->display.cdclk.max_cdclk_freq = 652800; } else if (IS_GEMINILAKE(dev_priv)) { - dev_priv->max_cdclk_freq = 316800; + dev_priv->display.cdclk.max_cdclk_freq = 316800; } else if (IS_BROXTON(dev_priv)) { - dev_priv->max_cdclk_freq = 624000; + dev_priv->display.cdclk.max_cdclk_freq = 624000; } else if (DISPLAY_VER(dev_priv) == 9) { u32 limit = intel_de_read(dev_priv, SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK; int max_cdclk, vco; @@ -2859,7 +2859,7 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv) else max_cdclk = 308571; - dev_priv->max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco); + dev_priv->display.cdclk.max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco); } else if (IS_BROADWELL(dev_priv)) { /* * FIXME with extra cooling we can allow @@ -2868,26 +2868,26 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv) * available? PCI ID, VTB, something else? */ if (intel_de_read(dev_priv, FUSE_STRAP) & HSW_CDCLK_LIMIT) - dev_priv->max_cdclk_freq = 450000; + dev_priv->display.cdclk.max_cdclk_freq = 450000; else if (IS_BDW_ULX(dev_priv)) - dev_priv->max_cdclk_freq = 450000; + dev_priv->display.cdclk.max_cdclk_freq = 450000; else if (IS_BDW_ULT(dev_priv)) - dev_priv->max_cdclk_freq = 540000; + dev_priv->display.cdclk.max_cdclk_freq = 540000; else - dev_priv->max_cdclk_freq = 675000; + dev_priv->display.cdclk.max_cdclk_freq = 675000; } else if (IS_CHERRYVIEW(dev_priv)) { - dev_priv->max_cdclk_freq = 320000; + dev_priv->display.cdclk.max_cdclk_freq = 320000; } else if (IS_VALLEYVIEW(dev_priv)) { - dev_priv->max_cdclk_freq = 400000; + dev_priv->display.cdclk.max_cdclk_freq = 400000; } else { /* otherwise assume cdclk is fixed */ - dev_priv->max_cdclk_freq = dev_priv->cdclk.hw.cdclk; + dev_priv->display.cdclk.max_cdclk_freq = dev_priv->display.cdclk.hw.cdclk; } dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv); drm_dbg(&dev_priv->drm, "Max CD clock rate: %d kHz\n", - dev_priv->max_cdclk_freq); + dev_priv->display.cdclk.max_cdclk_freq); drm_dbg(&dev_priv->drm, "Max dotclock rate: %d kHz\n", dev_priv->max_dotclk_freq); @@ -2901,7 +2901,7 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv) */ void intel_update_cdclk(struct drm_i915_private *dev_priv) { - intel_cdclk_get_cdclk(dev_priv, &dev_priv->cdclk.hw); + intel_cdclk_get_cdclk(dev_priv, &dev_priv->display.cdclk.hw); /* * 9:0 CMBUS [sic] CDCLK frequency (cdfreq): @@ -2911,7 +2911,7 @@ void intel_update_cdclk(struct drm_i915_private *dev_priv) */ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) intel_de_write(dev_priv, GMBUSFREQ_VLV, - DIV_ROUND_UP(dev_priv->cdclk.hw.cdclk, 1000)); + DIV_ROUND_UP(dev_priv->display.cdclk.hw.cdclk, 1000)); } static int dg1_rawclk(struct drm_i915_private *dev_priv) @@ -3036,6 +3036,13 @@ u32 intel_read_rawclk(struct drm_i915_private *dev_priv) if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1) freq = dg1_rawclk(dev_priv); + else if (INTEL_PCH_TYPE(dev_priv) >= PCH_MTP) + /* + * MTL always uses a 38.4 MHz rawclk. The bspec tells us + * "RAWCLK_FREQ defaults to the values for 38.4 and does + * not need to be programmed." + */ + freq = 38400; else if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) freq = cnp_rawclk(dev_priv); else if (HAS_PCH_SPLIT(dev_priv)) @@ -3187,78 +3194,78 @@ static const struct intel_cdclk_funcs i830_cdclk_funcs = { void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv) { if (IS_DG2(dev_priv)) { - dev_priv->cdclk_funcs = &tgl_cdclk_funcs; - dev_priv->cdclk.table = dg2_cdclk_table; + dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs; + dev_priv->display.cdclk.table = dg2_cdclk_table; } else if (IS_ALDERLAKE_P(dev_priv)) { - dev_priv->cdclk_funcs = &tgl_cdclk_funcs; + dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs; /* Wa_22011320316:adl-p[a0] */ if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) - dev_priv->cdclk.table = adlp_a_step_cdclk_table; + dev_priv->display.cdclk.table = adlp_a_step_cdclk_table; else - dev_priv->cdclk.table = adlp_cdclk_table; + dev_priv->display.cdclk.table = adlp_cdclk_table; } else if (IS_ROCKETLAKE(dev_priv)) { - dev_priv->cdclk_funcs = &tgl_cdclk_funcs; - dev_priv->cdclk.table = rkl_cdclk_table; + dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs; + dev_priv->display.cdclk.table = rkl_cdclk_table; } else if (DISPLAY_VER(dev_priv) >= 12) { - dev_priv->cdclk_funcs = &tgl_cdclk_funcs; - dev_priv->cdclk.table = icl_cdclk_table; + dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs; + dev_priv->display.cdclk.table = icl_cdclk_table; } else if (IS_JSL_EHL(dev_priv)) { - dev_priv->cdclk_funcs = &ehl_cdclk_funcs; - dev_priv->cdclk.table = icl_cdclk_table; + dev_priv->display.funcs.cdclk = &ehl_cdclk_funcs; + dev_priv->display.cdclk.table = icl_cdclk_table; } else if (DISPLAY_VER(dev_priv) >= 11) { - dev_priv->cdclk_funcs = &icl_cdclk_funcs; - dev_priv->cdclk.table = icl_cdclk_table; + dev_priv->display.funcs.cdclk = &icl_cdclk_funcs; + dev_priv->display.cdclk.table = icl_cdclk_table; } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { - dev_priv->cdclk_funcs = &bxt_cdclk_funcs; + dev_priv->display.funcs.cdclk = &bxt_cdclk_funcs; if (IS_GEMINILAKE(dev_priv)) - dev_priv->cdclk.table = glk_cdclk_table; + dev_priv->display.cdclk.table = glk_cdclk_table; else - dev_priv->cdclk.table = bxt_cdclk_table; + dev_priv->display.cdclk.table = bxt_cdclk_table; } else if (DISPLAY_VER(dev_priv) == 9) { - dev_priv->cdclk_funcs = &skl_cdclk_funcs; + dev_priv->display.funcs.cdclk = &skl_cdclk_funcs; } else if (IS_BROADWELL(dev_priv)) { - dev_priv->cdclk_funcs = &bdw_cdclk_funcs; + dev_priv->display.funcs.cdclk = &bdw_cdclk_funcs; } else if (IS_HASWELL(dev_priv)) { - dev_priv->cdclk_funcs = &hsw_cdclk_funcs; + dev_priv->display.funcs.cdclk = &hsw_cdclk_funcs; } else if (IS_CHERRYVIEW(dev_priv)) { - dev_priv->cdclk_funcs = &chv_cdclk_funcs; + dev_priv->display.funcs.cdclk = &chv_cdclk_funcs; } else if (IS_VALLEYVIEW(dev_priv)) { - dev_priv->cdclk_funcs = &vlv_cdclk_funcs; + dev_priv->display.funcs.cdclk = &vlv_cdclk_funcs; } else if (IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv)) { - dev_priv->cdclk_funcs = &fixed_400mhz_cdclk_funcs; + dev_priv->display.funcs.cdclk = &fixed_400mhz_cdclk_funcs; } else if (IS_IRONLAKE(dev_priv)) { - dev_priv->cdclk_funcs = &ilk_cdclk_funcs; + dev_priv->display.funcs.cdclk = &ilk_cdclk_funcs; } else if (IS_GM45(dev_priv)) { - dev_priv->cdclk_funcs = &gm45_cdclk_funcs; + dev_priv->display.funcs.cdclk = &gm45_cdclk_funcs; } else if (IS_G45(dev_priv)) { - dev_priv->cdclk_funcs = &g33_cdclk_funcs; + dev_priv->display.funcs.cdclk = &g33_cdclk_funcs; } else if (IS_I965GM(dev_priv)) { - dev_priv->cdclk_funcs = &i965gm_cdclk_funcs; + dev_priv->display.funcs.cdclk = &i965gm_cdclk_funcs; } else if (IS_I965G(dev_priv)) { - dev_priv->cdclk_funcs = &fixed_400mhz_cdclk_funcs; + dev_priv->display.funcs.cdclk = &fixed_400mhz_cdclk_funcs; } else if (IS_PINEVIEW(dev_priv)) { - dev_priv->cdclk_funcs = &pnv_cdclk_funcs; + dev_priv->display.funcs.cdclk = &pnv_cdclk_funcs; } else if (IS_G33(dev_priv)) { - dev_priv->cdclk_funcs = &g33_cdclk_funcs; + dev_priv->display.funcs.cdclk = &g33_cdclk_funcs; } else if (IS_I945GM(dev_priv)) { - dev_priv->cdclk_funcs = &i945gm_cdclk_funcs; + dev_priv->display.funcs.cdclk = &i945gm_cdclk_funcs; } else if (IS_I945G(dev_priv)) { - dev_priv->cdclk_funcs = &fixed_400mhz_cdclk_funcs; + dev_priv->display.funcs.cdclk = &fixed_400mhz_cdclk_funcs; } else if (IS_I915GM(dev_priv)) { - dev_priv->cdclk_funcs = &i915gm_cdclk_funcs; + dev_priv->display.funcs.cdclk = &i915gm_cdclk_funcs; } else if (IS_I915G(dev_priv)) { - dev_priv->cdclk_funcs = &i915g_cdclk_funcs; + dev_priv->display.funcs.cdclk = &i915g_cdclk_funcs; } else if (IS_I865G(dev_priv)) { - dev_priv->cdclk_funcs = &i865g_cdclk_funcs; + dev_priv->display.funcs.cdclk = &i865g_cdclk_funcs; } else if (IS_I85X(dev_priv)) { - dev_priv->cdclk_funcs = &i85x_cdclk_funcs; + dev_priv->display.funcs.cdclk = &i85x_cdclk_funcs; } else if (IS_I845G(dev_priv)) { - dev_priv->cdclk_funcs = &i845g_cdclk_funcs; + dev_priv->display.funcs.cdclk = &i845g_cdclk_funcs; } else if (IS_I830(dev_priv)) { - dev_priv->cdclk_funcs = &i830_cdclk_funcs; + dev_priv->display.funcs.cdclk = &i830_cdclk_funcs; } - if (drm_WARN(&dev_priv->drm, !dev_priv->cdclk_funcs, + if (drm_WARN(&dev_priv->drm, !dev_priv->display.funcs.cdclk, "Unknown platform. Assuming i830\n")) - dev_priv->cdclk_funcs = &i830_cdclk_funcs; + dev_priv->display.funcs.cdclk = &i830_cdclk_funcs; } diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h index b535cf6a7d9e..c674879a84a5 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.h +++ b/drivers/gpu/drm/i915/display/intel_cdclk.h @@ -77,9 +77,9 @@ intel_atomic_get_cdclk_state(struct intel_atomic_state *state); #define to_intel_cdclk_state(x) container_of((x), struct intel_cdclk_state, base) #define intel_atomic_get_old_cdclk_state(state) \ - to_intel_cdclk_state(intel_atomic_get_old_global_obj_state(state, &to_i915(state->base.dev)->cdclk.obj)) + to_intel_cdclk_state(intel_atomic_get_old_global_obj_state(state, &to_i915(state->base.dev)->display.cdclk.obj)) #define intel_atomic_get_new_cdclk_state(state) \ - to_intel_cdclk_state(intel_atomic_get_new_global_obj_state(state, &to_i915(state->base.dev)->cdclk.obj)) + to_intel_cdclk_state(intel_atomic_get_new_global_obj_state(state, &to_i915(state->base.dev)->display.cdclk.obj)) int intel_cdclk_init(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c index 9583d17e858d..6bda4274eae9 100644 --- a/drivers/gpu/drm/i915/display/intel_color.c +++ b/drivers/gpu/drm/i915/display/intel_color.c @@ -26,6 +26,7 @@ #include "intel_de.h" #include "intel_display_types.h" #include "intel_dpll.h" +#include "intel_dsb.h" #include "vlv_dsi_pll.h" struct intel_color_funcs { @@ -1167,22 +1168,22 @@ void intel_color_load_luts(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - dev_priv->color_funcs->load_luts(crtc_state); + dev_priv->display.funcs.color->load_luts(crtc_state); } void intel_color_commit_noarm(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - if (dev_priv->color_funcs->color_commit_noarm) - dev_priv->color_funcs->color_commit_noarm(crtc_state); + if (dev_priv->display.funcs.color->color_commit_noarm) + dev_priv->display.funcs.color->color_commit_noarm(crtc_state); } void intel_color_commit_arm(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - dev_priv->color_funcs->color_commit_arm(crtc_state); + dev_priv->display.funcs.color->color_commit_arm(crtc_state); } static bool intel_can_preload_luts(const struct intel_crtc_state *new_crtc_state) @@ -1238,15 +1239,15 @@ int intel_color_check(struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - return dev_priv->color_funcs->color_check(crtc_state); + return dev_priv->display.funcs.color->color_check(crtc_state); } void intel_color_get_config(struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - if (dev_priv->color_funcs->read_luts) - dev_priv->color_funcs->read_luts(crtc_state); + if (dev_priv->display.funcs.color->read_luts) + dev_priv->display.funcs.color->read_luts(crtc_state); } static bool need_plane_update(struct intel_plane *plane, @@ -2225,28 +2226,28 @@ void intel_color_init(struct intel_crtc *crtc) if (HAS_GMCH(dev_priv)) { if (IS_CHERRYVIEW(dev_priv)) { - dev_priv->color_funcs = &chv_color_funcs; + dev_priv->display.funcs.color = &chv_color_funcs; } else if (DISPLAY_VER(dev_priv) >= 4) { - dev_priv->color_funcs = &i965_color_funcs; + dev_priv->display.funcs.color = &i965_color_funcs; } else { - dev_priv->color_funcs = &i9xx_color_funcs; + dev_priv->display.funcs.color = &i9xx_color_funcs; } } else { if (DISPLAY_VER(dev_priv) >= 11) - dev_priv->color_funcs = &icl_color_funcs; + dev_priv->display.funcs.color = &icl_color_funcs; else if (DISPLAY_VER(dev_priv) == 10) - dev_priv->color_funcs = &glk_color_funcs; + dev_priv->display.funcs.color = &glk_color_funcs; else if (DISPLAY_VER(dev_priv) == 9) - dev_priv->color_funcs = &skl_color_funcs; + dev_priv->display.funcs.color = &skl_color_funcs; else if (DISPLAY_VER(dev_priv) == 8) - dev_priv->color_funcs = &bdw_color_funcs; + dev_priv->display.funcs.color = &bdw_color_funcs; else if (DISPLAY_VER(dev_priv) == 7) { if (IS_HASWELL(dev_priv)) - dev_priv->color_funcs = &hsw_color_funcs; + dev_priv->display.funcs.color = &hsw_color_funcs; else - dev_priv->color_funcs = &ivb_color_funcs; + dev_priv->display.funcs.color = &ivb_color_funcs; } else - dev_priv->color_funcs = &ilk_color_funcs; + dev_priv->display.funcs.color = &ilk_color_funcs; } drm_crtc_enable_color_mgmt(&crtc->base, diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c index 1dcc268927a2..6d5cbeb8df4d 100644 --- a/drivers/gpu/drm/i915/display/intel_connector.c +++ b/drivers/gpu/drm/i915/display/intel_connector.c @@ -229,7 +229,7 @@ intel_attach_force_audio_property(struct drm_connector *connector) struct drm_i915_private *dev_priv = to_i915(dev); struct drm_property *prop; - prop = dev_priv->force_audio_property; + prop = dev_priv->display.properties.force_audio; if (prop == NULL) { prop = drm_property_create_enum(dev, 0, "audio", @@ -238,7 +238,7 @@ intel_attach_force_audio_property(struct drm_connector *connector) if (prop == NULL) return; - dev_priv->force_audio_property = prop; + dev_priv->display.properties.force_audio = prop; } drm_object_attach_property(&connector->base, prop, 0); } @@ -256,7 +256,7 @@ intel_attach_broadcast_rgb_property(struct drm_connector *connector) struct drm_i915_private *dev_priv = to_i915(dev); struct drm_property *prop; - prop = dev_priv->broadcast_rgb_property; + prop = dev_priv->display.properties.broadcast_rgb; if (prop == NULL) { prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM, "Broadcast RGB", @@ -265,7 +265,7 @@ intel_attach_broadcast_rgb_property(struct drm_connector *connector) if (prop == NULL) return; - dev_priv->broadcast_rgb_property = prop; + dev_priv->display.properties.broadcast_rgb = prop; } drm_object_attach_property(&connector->base, prop, 0); diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c index 6a3893c8ff22..4a8ff2f97608 100644 --- a/drivers/gpu/drm/i915/display/intel_crt.c +++ b/drivers/gpu/drm/i915/display/intel_crt.c @@ -46,6 +46,7 @@ #include "intel_gmbus.h" #include "intel_hotplug.h" #include "intel_pch_display.h" +#include "intel_pch_refclk.h" /* Here's the desired hotplug mode */ #define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_PERIOD_128 | \ @@ -444,6 +445,8 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder, /* FDI must always be 2.7 GHz */ pipe_config->port_clock = 135000 * 2; + adjusted_mode->crtc_clock = lpt_iclkip(pipe_config); + return 0; } @@ -643,9 +646,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector) struct i2c_adapter *i2c; bool ret = false; - BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG); - - i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin); + i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->display.vbt.crt_ddc_pin); edid = intel_crt_get_edid(connector, i2c); if (edid) { @@ -931,7 +932,7 @@ static int intel_crt_get_modes(struct drm_connector *connector) wakeref = intel_display_power_get(dev_priv, intel_encoder->power_domain); - i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin); + i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->display.vbt.crt_ddc_pin); ret = intel_crt_ddc_get_modes(connector, i2c); if (ret || !IS_G4X(dev_priv)) goto out; @@ -1110,8 +1111,8 @@ void intel_crt_init(struct drm_i915_private *dev_priv) u32 fdi_config = FDI_RX_POLARITY_REVERSED_LPT | FDI_RX_LINK_REVERSAL_OVERRIDE; - dev_priv->fdi_rx_config = intel_de_read(dev_priv, - FDI_RX_CTL(PIPE_A)) & fdi_config; + dev_priv->display.fdi.rx_config = intel_de_read(dev_priv, + FDI_RX_CTL(PIPE_A)) & fdi_config; } intel_crt_reset(&crt->base.base); diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c index 4ca6e9493ff2..e9212f69c360 100644 --- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c +++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c @@ -134,8 +134,8 @@ static void intel_dump_plane_state(const struct intel_plane_state *plane_state) plane->base.base.id, plane->base.name, fb->base.id, fb->width, fb->height, &fb->format->format, fb->modifier, str_yes_no(plane_state->uapi.visible)); - drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n", - plane_state->hw.rotation, plane_state->scaler_id); + drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d, scaling_filter: %d\n", + plane_state->hw.rotation, plane_state->scaler_id, plane_state->hw.scaling_filter); if (plane_state->uapi.visible) drm_dbg_kms(&i915->drm, "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n", @@ -262,10 +262,11 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config, if (DISPLAY_VER(i915) >= 9) drm_dbg_kms(&i915->drm, - "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n", + "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d, scaling_filter: %d\n", crtc->num_scalers, pipe_config->scaler_state.scaler_users, - pipe_config->scaler_state.scaler_id); + pipe_config->scaler_state.scaler_id, + pipe_config->hw.scaling_filter); if (HAS_GMCH(i915)) drm_dbg_kms(&i915->drm, diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c index 16ac560eab49..87899e89b3a7 100644 --- a/drivers/gpu/drm/i915/display/intel_cursor.c +++ b/drivers/gpu/drm/i915/display/intel_cursor.c @@ -19,9 +19,9 @@ #include "intel_fb.h" #include "intel_fb_pin.h" #include "intel_frontbuffer.h" -#include "intel_pm.h" #include "intel_psr.h" #include "intel_sprite.h" +#include "skl_watermark.h" /* Cursor formats */ static const u32 intel_cursor_formats[] = { diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 2330604b0bcc..643832d55c28 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -57,6 +57,7 @@ #include "intel_lspcon.h" #include "intel_pps.h" #include "intel_psr.h" +#include "intel_quirks.h" #include "intel_snps_phy.h" #include "intel_sprite.h" #include "intel_tc.h" @@ -323,28 +324,6 @@ static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv, } } -int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config) -{ - int dotclock; - - if (intel_crtc_has_dp_encoder(pipe_config)) - dotclock = intel_dotclock_calculate(pipe_config->port_clock, - &pipe_config->dp_m_n); - else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp > 24) - dotclock = pipe_config->port_clock * 24 / pipe_config->pipe_bpp; - else - dotclock = pipe_config->port_clock; - - if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 && - !intel_crtc_has_dp_encoder(pipe_config)) - dotclock *= 2; - - if (pipe_config->pixel_multiplier) - dotclock /= pipe_config->pixel_multiplier; - - return dotclock; -} - static void ddi_dotclock_get(struct intel_crtc_state *pipe_config) { /* CRT dotclock is determined via other means */ @@ -631,7 +610,7 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), ctl); - if (dev_priv->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME && + if (intel_has_quirk(dev_priv, QUIRK_INCREASE_DDI_DISABLED_TIME) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { drm_dbg_kms(&dev_priv->drm, "Quirk Increase DDI disabled time\n"); @@ -1425,7 +1404,7 @@ hsw_set_signal_levels(struct intel_encoder *encoder, static void _icl_ddi_enable_clock(struct drm_i915_private *i915, i915_reg_t reg, u32 clk_sel_mask, u32 clk_sel, u32 clk_off) { - mutex_lock(&i915->dpll.lock); + mutex_lock(&i915->display.dpll.lock); intel_de_rmw(i915, reg, clk_sel_mask, clk_sel); @@ -1435,17 +1414,17 @@ static void _icl_ddi_enable_clock(struct drm_i915_private *i915, i915_reg_t reg, */ intel_de_rmw(i915, reg, clk_off, 0); - mutex_unlock(&i915->dpll.lock); + mutex_unlock(&i915->display.dpll.lock); } static void _icl_ddi_disable_clock(struct drm_i915_private *i915, i915_reg_t reg, u32 clk_off) { - mutex_lock(&i915->dpll.lock); + mutex_lock(&i915->display.dpll.lock); intel_de_rmw(i915, reg, 0, clk_off); - mutex_unlock(&i915->dpll.lock); + mutex_unlock(&i915->display.dpll.lock); } static bool _icl_ddi_is_clock_enabled(struct drm_i915_private *i915, i915_reg_t reg, @@ -1720,12 +1699,12 @@ static void icl_ddi_tc_enable_clock(struct intel_encoder *encoder, intel_de_write(i915, DDI_CLK_SEL(port), icl_pll_to_ddi_clk_sel(encoder, crtc_state)); - mutex_lock(&i915->dpll.lock); + mutex_lock(&i915->display.dpll.lock); intel_de_rmw(i915, ICL_DPCLKA_CFGCR0, ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port), 0); - mutex_unlock(&i915->dpll.lock); + mutex_unlock(&i915->display.dpll.lock); } static void icl_ddi_tc_disable_clock(struct intel_encoder *encoder) @@ -1734,12 +1713,12 @@ static void icl_ddi_tc_disable_clock(struct intel_encoder *encoder) enum tc_port tc_port = intel_port_to_tc(i915, encoder->port); enum port port = encoder->port; - mutex_lock(&i915->dpll.lock); + mutex_lock(&i915->display.dpll.lock); intel_de_rmw(i915, ICL_DPCLKA_CFGCR0, 0, ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port)); - mutex_unlock(&i915->dpll.lock); + mutex_unlock(&i915->display.dpll.lock); intel_de_write(i915, DDI_CLK_SEL(port), DDI_CLK_SEL_NONE); } @@ -1824,7 +1803,7 @@ static void skl_ddi_enable_clock(struct intel_encoder *encoder, if (drm_WARN_ON(&i915->drm, !pll)) return; - mutex_lock(&i915->dpll.lock); + mutex_lock(&i915->display.dpll.lock); intel_de_rmw(i915, DPLL_CTRL2, DPLL_CTRL2_DDI_CLK_OFF(port) | @@ -1832,7 +1811,7 @@ static void skl_ddi_enable_clock(struct intel_encoder *encoder, DPLL_CTRL2_DDI_CLK_SEL(pll->info->id, port) | DPLL_CTRL2_DDI_SEL_OVERRIDE(port)); - mutex_unlock(&i915->dpll.lock); + mutex_unlock(&i915->display.dpll.lock); } static void skl_ddi_disable_clock(struct intel_encoder *encoder) @@ -1840,12 +1819,12 @@ static void skl_ddi_disable_clock(struct intel_encoder *encoder) struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum port port = encoder->port; - mutex_lock(&i915->dpll.lock); + mutex_lock(&i915->display.dpll.lock); intel_de_rmw(i915, DPLL_CTRL2, 0, DPLL_CTRL2_DDI_CLK_OFF(port)); - mutex_unlock(&i915->dpll.lock); + mutex_unlock(&i915->display.dpll.lock); } static bool skl_ddi_is_clock_enabled(struct intel_encoder *encoder) @@ -2691,10 +2670,14 @@ static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state, dig_port->set_infoframes(encoder, false, old_crtc_state, old_conn_state); - intel_ddi_disable_pipe_clock(old_crtc_state); + if (DISPLAY_VER(dev_priv) < 12) + intel_ddi_disable_pipe_clock(old_crtc_state); intel_disable_ddi_buf(encoder, old_crtc_state); + if (DISPLAY_VER(dev_priv) >= 12) + intel_ddi_disable_pipe_clock(old_crtc_state); + intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain, fetch_and_zero(&dig_port->ddi_io_wakeref)); @@ -2862,6 +2845,8 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state, struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct drm_connector *connector = conn_state->connector; enum port port = encoder->port; + enum phy phy = intel_port_to_phy(dev_priv, port); + u32 buf_ctl; if (!intel_hdmi_handle_sink_scrambling(encoder, connector, crtc_state->hdmi_high_tmds_clock_ratio, @@ -2919,8 +2904,12 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state, * On ADL_P the PHY link rate and lane count must be programmed but * these are both 0 for HDMI. */ - intel_de_write(dev_priv, DDI_BUF_CTL(port), - dig_port->saved_port_bits | DDI_BUF_CTL_ENABLE); + buf_ctl = dig_port->saved_port_bits | DDI_BUF_CTL_ENABLE; + if (IS_ALDERLAKE_P(dev_priv) && intel_phy_is_tc(dev_priv, phy)) { + drm_WARN_ON(&dev_priv->drm, !intel_tc_port_in_legacy_mode(dig_port)); + buf_ctl |= DDI_BUF_CTL_TC_PHY_OWNERSHIP; + } + intel_de_write(dev_priv, DDI_BUF_CTL(port), buf_ctl); intel_audio_codec_enable(encoder, crtc_state, conn_state); } @@ -4028,7 +4017,7 @@ intel_ddi_hotplug(struct intel_encoder *encoder, static bool lpt_digital_port_connected(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - u32 bit = dev_priv->hotplug.pch_hpd[encoder->hpd_pin]; + u32 bit = dev_priv->display.hotplug.pch_hpd[encoder->hpd_pin]; return intel_de_read(dev_priv, SDEISR) & bit; } @@ -4036,7 +4025,7 @@ static bool lpt_digital_port_connected(struct intel_encoder *encoder) static bool hsw_digital_port_connected(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - u32 bit = dev_priv->hotplug.hpd[encoder->hpd_pin]; + u32 bit = dev_priv->display.hotplug.hpd[encoder->hpd_pin]; return intel_de_read(dev_priv, DEISR) & bit; } @@ -4044,7 +4033,7 @@ static bool hsw_digital_port_connected(struct intel_encoder *encoder) static bool bdw_digital_port_connected(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - u32 bit = dev_priv->hotplug.hpd[encoder->hpd_pin]; + u32 bit = dev_priv->display.hotplug.hpd[encoder->hpd_pin]; return intel_de_read(dev_priv, GEN8_DE_PORT_ISR) & bit; } diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index f4f7c3414762..accf5311b664 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -91,6 +91,7 @@ #include "intel_dmc.h" #include "intel_dp_link_training.h" #include "intel_dpt.h" +#include "intel_dsb.h" #include "intel_fbc.h" #include "intel_fbdev.h" #include "intel_fdi.h" @@ -117,6 +118,7 @@ #include "i9xx_plane.h" #include "skl_scaler.h" #include "skl_universal_plane.h" +#include "skl_watermark.h" #include "vlv_dsi.h" #include "vlv_dsi_pll.h" #include "vlv_dsi_regs.h" @@ -163,16 +165,16 @@ static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state); */ void intel_update_watermarks(struct drm_i915_private *dev_priv) { - if (dev_priv->wm_disp->update_wm) - dev_priv->wm_disp->update_wm(dev_priv); + if (dev_priv->display.funcs.wm->update_wm) + dev_priv->display.funcs.wm->update_wm(dev_priv); } static int intel_compute_pipe_wm(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); - if (dev_priv->wm_disp->compute_pipe_wm) - return dev_priv->wm_disp->compute_pipe_wm(state, crtc); + if (dev_priv->display.funcs.wm->compute_pipe_wm) + return dev_priv->display.funcs.wm->compute_pipe_wm(state, crtc); return 0; } @@ -180,20 +182,20 @@ static int intel_compute_intermediate_wm(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); - if (!dev_priv->wm_disp->compute_intermediate_wm) + if (!dev_priv->display.funcs.wm->compute_intermediate_wm) return 0; if (drm_WARN_ON(&dev_priv->drm, - !dev_priv->wm_disp->compute_pipe_wm)) + !dev_priv->display.funcs.wm->compute_pipe_wm)) return 0; - return dev_priv->wm_disp->compute_intermediate_wm(state, crtc); + return dev_priv->display.funcs.wm->compute_intermediate_wm(state, crtc); } static bool intel_initial_watermarks(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); - if (dev_priv->wm_disp->initial_watermarks) { - dev_priv->wm_disp->initial_watermarks(state, crtc); + if (dev_priv->display.funcs.wm->initial_watermarks) { + dev_priv->display.funcs.wm->initial_watermarks(state, crtc); return true; } return false; @@ -203,23 +205,23 @@ static void intel_atomic_update_watermarks(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); - if (dev_priv->wm_disp->atomic_update_watermarks) - dev_priv->wm_disp->atomic_update_watermarks(state, crtc); + if (dev_priv->display.funcs.wm->atomic_update_watermarks) + dev_priv->display.funcs.wm->atomic_update_watermarks(state, crtc); } static void intel_optimize_watermarks(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); - if (dev_priv->wm_disp->optimize_watermarks) - dev_priv->wm_disp->optimize_watermarks(state, crtc); + if (dev_priv->display.funcs.wm->optimize_watermarks) + dev_priv->display.funcs.wm->optimize_watermarks(state, crtc); } static int intel_compute_global_watermarks(struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); - if (dev_priv->wm_disp->compute_global_watermarks) - return dev_priv->wm_disp->compute_global_watermarks(state); + if (dev_priv->display.funcs.wm->compute_global_watermarks) + return dev_priv->display.funcs.wm->compute_global_watermarks(state); return 0; } @@ -618,7 +620,10 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state) if (!IS_I830(dev_priv)) val &= ~PIPECONF_ENABLE; - if (DISPLAY_VER(dev_priv) >= 12) + if (DISPLAY_VER(dev_priv) >= 14) + intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(cpu_transcoder), + FECSTALL_DIS_DPTSTREAM_DPTTG, 0); + else if (DISPLAY_VER(dev_priv) >= 12) intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), FECSTALL_DIS_DPTSTREAM_DPTTG, 0); @@ -1486,7 +1491,7 @@ static void intel_encoders_update_prepare(struct intel_atomic_state *state) * Make sure the DPLL state is up-to-date for fastset TypeC ports after non-blocking commits. * TODO: Update the DPLL state for all cases in the encoder->update_prepare() hook. */ - if (i915->dpll.mgr) { + if (i915->display.dpll.mgr) { for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if (intel_crtc_needs_modeset(new_crtc_state)) continue; @@ -1838,7 +1843,9 @@ static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder); + enum transcoder transcoder = crtc_state->cpu_transcoder; + i915_reg_t reg = DISPLAY_VER(dev_priv) >= 14 ? MTL_CHICKEN_TRANS(transcoder) : + CHICKEN_TRANS(transcoder); u32 val; val = intel_de_read(dev_priv, reg); @@ -2080,22 +2087,20 @@ bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy) { if (phy == PHY_NONE) return false; - else if (IS_DG2(dev_priv)) - /* - * DG2 outputs labelled as "combo PHY" in the bspec use - * SNPS PHYs with completely different programming, - * hence we always return false here. - */ - return false; else if (IS_ALDERLAKE_S(dev_priv)) return phy <= PHY_E; else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) return phy <= PHY_D; else if (IS_JSL_EHL(dev_priv)) return phy <= PHY_C; - else if (DISPLAY_VER(dev_priv) >= 11) + else if (IS_ALDERLAKE_P(dev_priv) || IS_DISPLAY_VER(dev_priv, 11, 12)) return phy <= PHY_B; else + /* + * DG2 outputs labelled as "combo PHY" in the bspec use + * SNPS PHYs with completely different programming, + * hence we always return false here. + */ return false; } @@ -2401,7 +2406,7 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state, if (DISPLAY_VER(dev_priv) != 2) intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); - if (!dev_priv->wm_disp->initial_watermarks) + if (!dev_priv->display.funcs.wm->initial_watermarks) intel_update_watermarks(dev_priv); /* clock the pipe down to 640x480@60 to potentially save power */ @@ -2660,7 +2665,7 @@ static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state) intel_mode_from_crtc_timings(pipe_mode, pipe_mode); if (DISPLAY_VER(i915) < 4) { - clock_limit = i915->max_cdclk_freq * 9 / 10; + clock_limit = i915->display.cdclk.max_cdclk_freq * 9 / 10; /* * Enable double wide mode when the dot clock @@ -2692,6 +2697,10 @@ static int intel_crtc_compute_config(struct intel_atomic_state *state, intel_atomic_get_new_crtc_state(state, crtc); int ret; + ret = intel_dpll_crtc_compute_clock(state, crtc); + if (ret) + return ret; + ret = intel_crtc_compute_pipe_src(crtc_state); if (ret) return ret; @@ -2718,19 +2727,11 @@ intel_reduce_m_n_ratio(u32 *num, u32 *den) } } -static void compute_m_n(unsigned int m, unsigned int n, - u32 *ret_m, u32 *ret_n, - bool constant_n) +static void compute_m_n(u32 *ret_m, u32 *ret_n, + u32 m, u32 n, u32 constant_n) { - /* - * Several DP dongles in particular seem to be fussy about - * too large link M/N values. Give N value as 0x8000 that - * should be acceptable by specific devices. 0x8000 is the - * specified fixed N value for asynchronous clock mode, - * which the devices expect also in synchronous clock mode. - */ if (constant_n) - *ret_n = DP_LINK_CONSTANT_N_VALUE; + *ret_n = constant_n; else *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); @@ -2742,22 +2743,28 @@ void intel_link_compute_m_n(u16 bits_per_pixel, int nlanes, int pixel_clock, int link_clock, struct intel_link_m_n *m_n, - bool constant_n, bool fec_enable) + bool fec_enable) { u32 data_clock = bits_per_pixel * pixel_clock; if (fec_enable) data_clock = intel_dp_mode_to_fec_clock(data_clock); + /* + * Windows/BIOS uses fixed M/N values always. Follow suit. + * + * Also several DP dongles in particular seem to be fussy + * about too large link M/N values. Presumably the 20bit + * value used by Windows/BIOS is acceptable to everyone. + */ m_n->tu = 64; - compute_m_n(data_clock, - link_clock * nlanes * 8, - &m_n->data_m, &m_n->data_n, - constant_n); + compute_m_n(&m_n->data_m, &m_n->data_n, + data_clock, link_clock * nlanes * 8, + 0x8000000); - compute_m_n(pixel_clock, link_clock, - &m_n->link_m, &m_n->link_n, - constant_n); + compute_m_n(&m_n->link_m, &m_n->link_n, + pixel_clock, link_clock, + 0x80000); } static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv) @@ -2773,12 +2780,12 @@ static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv) PCH_DREF_CONTROL) & DREF_SSC1_ENABLE; - if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) { + if (dev_priv->display.vbt.lvds_use_ssc != bios_lvds_use_ssc) { drm_dbg_kms(&dev_priv->drm, "SSC %s by BIOS, overriding VBT which says %s\n", str_enabled_disabled(bios_lvds_use_ssc), - str_enabled_disabled(dev_priv->vbt.lvds_use_ssc)); - dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc; + str_enabled_disabled(dev_priv->display.vbt.lvds_use_ssc)); + dev_priv->display.vbt.lvds_use_ssc = bios_lvds_use_ssc; } } } @@ -4126,7 +4133,9 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc, } if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) { - tmp = intel_de_read(dev_priv, CHICKEN_TRANS(pipe_config->cpu_transcoder)); + tmp = intel_de_read(dev_priv, DISPLAY_VER(dev_priv) >= 14 ? + MTL_CHICKEN_TRANS(pipe_config->cpu_transcoder) : + CHICKEN_TRANS(pipe_config->cpu_transcoder)); pipe_config->framestart_delay = REG_FIELD_GET(HSW_FRAME_START_DELAY_MASK, tmp) + 1; } else { @@ -4145,7 +4154,7 @@ bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state) struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); - if (!i915->display->get_pipe_config(crtc, crtc_state)) + if (!i915->display.funcs.display->get_pipe_config(crtc, crtc_state)) return false; crtc_state->hw.active = true; @@ -4374,7 +4383,7 @@ static int i9xx_pll_refclk(struct drm_device *dev, u32 dpll = pipe_config->dpll_hw_state.dpll; if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) - return dev_priv->vbt.lvds_ssc_freq; + return dev_priv->display.vbt.lvds_ssc_freq; else if (HAS_PCH_SPLIT(dev_priv)) return 120000; else if (DISPLAY_VER(dev_priv) != 2) @@ -4492,7 +4501,31 @@ int intel_dotclock_calculate(int link_freq, if (!m_n->link_n) return 0; - return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n); + return DIV_ROUND_UP_ULL(mul_u32_u32(m_n->link_m, link_freq), + m_n->link_n); +} + +int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config) +{ + int dotclock; + + if (intel_crtc_has_dp_encoder(pipe_config)) + dotclock = intel_dotclock_calculate(pipe_config->port_clock, + &pipe_config->dp_m_n); + else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp > 24) + dotclock = DIV_ROUND_CLOSEST(pipe_config->port_clock * 24, + pipe_config->pipe_bpp); + else + dotclock = pipe_config->port_clock; + + if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 && + !intel_crtc_has_dp_encoder(pipe_config)) + dotclock *= 2; + + if (pipe_config->pixel_multiplier) + dotclock /= pipe_config->pixel_multiplier; + + return dotclock; } /* Returns the currently programmed mode of the given encoder. */ @@ -4753,7 +4786,7 @@ static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state) /* Display WA #1135: BXT:ALL GLK:ALL */ if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && - dev_priv->ipc_enabled) + skl_watermark_ipc_enabled(dev_priv)) linetime_wm /= 2; return min(linetime_wm, 0x1ff); @@ -4799,10 +4832,6 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state, crtc_state->update_wm_post = true; if (mode_changed) { - ret = intel_dpll_crtc_compute_clock(state, crtc); - if (ret) - return ret; - ret = intel_dpll_crtc_get_shared_dpll(state, crtc); if (ret) return ret; @@ -5367,46 +5396,14 @@ bool intel_fuzzy_clock_check(int clock1, int clock2) } static bool -intel_compare_m_n(unsigned int m, unsigned int n, - unsigned int m2, unsigned int n2, - bool exact) -{ - if (m == m2 && n == n2) - return true; - - if (exact || !m || !n || !m2 || !n2) - return false; - - BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX); - - if (n > n2) { - while (n > n2) { - m2 <<= 1; - n2 <<= 1; - } - } else if (n < n2) { - while (n < n2) { - m <<= 1; - n <<= 1; - } - } - - if (n != n2) - return false; - - return intel_fuzzy_clock_check(m, m2); -} - -static bool intel_compare_link_m_n(const struct intel_link_m_n *m_n, - const struct intel_link_m_n *m2_n2, - bool exact) + const struct intel_link_m_n *m2_n2) { return m_n->tu == m2_n2->tu && - intel_compare_m_n(m_n->data_m, m_n->data_n, - m2_n2->data_m, m2_n2->data_n, exact) && - intel_compare_m_n(m_n->link_m, m_n->link_n, - m2_n2->link_m, m2_n2->link_n, exact); + m_n->data_m == m2_n2->data_m && + m_n->data_n == m2_n2->data_n && + m_n->link_m == m2_n2->link_m && + m_n->link_n == m2_n2->link_n; } static bool @@ -5600,8 +5597,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, #define PIPE_CONF_CHECK_M_N(name) do { \ if (!intel_compare_link_m_n(¤t_config->name, \ - &pipe_config->name,\ - !fastset)) { \ + &pipe_config->name)) { \ pipe_config_mismatch(fastset, crtc, __stringify(name), \ "(expected tu %i data %i/%i link %i/%i, " \ "found tu %i, data %i/%i link %i/%i)", \ @@ -5648,9 +5644,9 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, */ #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \ if (!intel_compare_link_m_n(¤t_config->name, \ - &pipe_config->name, !fastset) && \ + &pipe_config->name) && \ !intel_compare_link_m_n(¤t_config->alt_name, \ - &pipe_config->name, !fastset)) { \ + &pipe_config->name)) { \ pipe_config_mismatch(fastset, crtc, __stringify(name), \ "(expected tu %i data %i/%i link %i/%i, " \ "or tu %i data %i/%i link %i/%i, " \ @@ -5685,16 +5681,6 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, } \ } while (0) -#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \ - if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \ - pipe_config_mismatch(fastset, crtc, __stringify(name), \ - "(expected %i, found %i)", \ - current_config->name, \ - pipe_config->name); \ - ret = false; \ - } \ -} while (0) - #define PIPE_CONF_CHECK_INFOFRAME(name) do { \ if (!intel_compare_infoframe(¤t_config->infoframes.name, \ &pipe_config->infoframes.name)) { \ @@ -5750,8 +5736,9 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_I(lane_count); PIPE_CONF_CHECK_X(lane_lat_optim_mask); - if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) { - PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2); + if (HAS_DOUBLE_BUFFERED_M_N(dev_priv)) { + if (!fastset || !pipe_config->seamless_m_n) + PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2); } else { PIPE_CONF_CHECK_M_N(dp_m_n); PIPE_CONF_CHECK_M_N(dp_m2_n2); @@ -5813,7 +5800,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_RECT(pch_pfit.dst); PIPE_CONF_CHECK_I(scaler_state.scaler_id); - PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate); + PIPE_CONF_CHECK_I(pixel_rate); PIPE_CONF_CHECK_X(gamma_mode); if (IS_CHERRYVIEW(dev_priv)) @@ -5840,7 +5827,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_BOOL(double_wide); - if (dev_priv->dpll.mgr) { + if (dev_priv->display.dpll.mgr) { PIPE_CONF_CHECK_P(shared_dpll); PIPE_CONF_CHECK_X(dpll_hw_state.dpll); @@ -5883,9 +5870,11 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5) PIPE_CONF_CHECK_I(pipe_bpp); - PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock); - PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock); - PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); + if (!fastset || !pipe_config->seamless_m_n) { + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_clock); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_clock); + } + PIPE_CONF_CHECK_I(port_clock); PIPE_CONF_CHECK_I(min_voltage_level); @@ -5927,7 +5916,6 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE #undef PIPE_CONF_CHECK_P #undef PIPE_CONF_CHECK_FLAGS -#undef PIPE_CONF_CHECK_CLOCK_FUZZY #undef PIPE_CONF_CHECK_COLOR_LUT #undef PIPE_CONF_CHECK_TIMINGS #undef PIPE_CONF_CHECK_RECT @@ -6049,20 +6037,6 @@ void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state) } } -static void intel_modeset_clear_plls(struct intel_atomic_state *state) -{ - struct intel_crtc_state *new_crtc_state; - struct intel_crtc *crtc; - int i; - - for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { - if (!intel_crtc_needs_modeset(new_crtc_state)) - continue; - - intel_release_shared_dplls(state, crtc); - } -} - /* * This implements the workaround described in the "notes" section of the mode * set sequence documentation. When going from no pipes or single pipe to @@ -6163,23 +6137,6 @@ static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_sta new_crtc_state->update_pipe = true; } -static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state, - struct intel_crtc_state *new_crtc_state) -{ - /* - * If we're not doing the full modeset we want to - * keep the current M/N values as they may be - * sufficiently different to the computed values - * to cause problems. - * - * FIXME: should really copy more fuzzy state here - */ - new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n; - new_crtc_state->dp_m_n = old_crtc_state->dp_m_n; - new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2; - new_crtc_state->has_drrs = old_crtc_state->has_drrs; -} - static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state, struct intel_crtc *crtc, u8 plane_ids_mask) @@ -6836,9 +6793,11 @@ static int intel_atomic_check(struct drm_device *dev, if (!intel_crtc_needs_modeset(new_crtc_state)) continue; - ret = intel_modeset_pipe_config_late(state, crtc); - if (ret) - goto fail; + if (new_crtc_state->hw.enable) { + ret = intel_modeset_pipe_config_late(state, crtc); + if (ret) + goto fail; + } intel_crtc_check_fastset(old_crtc_state, new_crtc_state); } @@ -6889,15 +6848,12 @@ static int intel_atomic_check(struct drm_device *dev, for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { - if (intel_crtc_needs_modeset(new_crtc_state)) { - any_ms = true; + if (!intel_crtc_needs_modeset(new_crtc_state)) continue; - } - if (!new_crtc_state->update_pipe) - continue; + any_ms = true; - intel_crtc_copy_fastset(old_crtc_state, new_crtc_state); + intel_release_shared_dplls(state, crtc); } if (any_ms && !check_digital_port_conflicts(state)) { @@ -6938,8 +6894,6 @@ static int intel_atomic_check(struct drm_device *dev, ret = intel_modeset_calc_cdclk(state); if (ret) return ret; - - intel_modeset_clear_plls(state); } ret = intel_atomic_check_crtcs(state); @@ -7058,6 +7012,10 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state, if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) hsw_set_linetime_wm(new_crtc_state); + + if (new_crtc_state->seamless_m_n) + intel_cpu_transcoder_set_m1_n1(crtc, new_crtc_state->cpu_transcoder, + &new_crtc_state->dp_m_n); } static void commit_pipe_pre_planes(struct intel_atomic_state *state, @@ -7120,7 +7078,7 @@ static void intel_enable_crtc(struct intel_atomic_state *state, intel_crtc_update_active_timings(new_crtc_state); - dev_priv->display->crtc_enable(state, crtc); + dev_priv->display.funcs.display->crtc_enable(state, crtc); if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) return; @@ -7199,7 +7157,7 @@ static void intel_old_crtc_state_disables(struct intel_atomic_state *state, */ intel_crtc_disable_pipe_crc(crtc); - dev_priv->display->crtc_disable(state, crtc); + dev_priv->display.funcs.display->crtc_disable(state, crtc); crtc->active = false; intel_fbc_disable(crtc); intel_disable_shared_dpll(old_crtc_state); @@ -7410,7 +7368,7 @@ static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv) struct intel_atomic_state *state, *next; struct llist_node *freed; - freed = llist_del_all(&dev_priv->atomic_helper.free_list); + freed = llist_del_all(&dev_priv->display.atomic_helper.free_list); llist_for_each_entry_safe(state, next, freed, freed) drm_atomic_state_put(&state->base); } @@ -7418,7 +7376,7 @@ static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv) static void intel_atomic_helper_free_state_worker(struct work_struct *work) { struct drm_i915_private *dev_priv = - container_of(work, typeof(*dev_priv), atomic_helper.free_work); + container_of(work, typeof(*dev_priv), display.atomic_helper.free_work); intel_atomic_helper_free_state(dev_priv); } @@ -7588,7 +7546,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) } /* Now enable the clocks, plane, pipe, and connectors that we set up. */ - dev_priv->display->commit_modeset_enables(state); + dev_priv->display.funcs.display->commit_modeset_enables(state); intel_encoders_update_complete(state); @@ -7711,7 +7669,7 @@ intel_atomic_commit_ready(struct i915_sw_fence *fence, case FENCE_FREE: { struct intel_atomic_helper *helper = - &to_i915(state->base.dev)->atomic_helper; + &to_i915(state->base.dev)->display.atomic_helper; if (llist_add(&state->freed, &helper->free_list)) schedule_work(&helper->free_work); @@ -7814,12 +7772,12 @@ static int intel_atomic_commit(struct drm_device *dev, i915_sw_fence_commit(&state->commit_ready); if (nonblock && state->modeset) { - queue_work(dev_priv->modeset_wq, &state->base.commit_work); + queue_work(dev_priv->display.wq.modeset, &state->base.commit_work); } else if (nonblock) { - queue_work(dev_priv->flip_wq, &state->base.commit_work); + queue_work(dev_priv->display.wq.flip, &state->base.commit_work); } else { if (state->modeset) - flush_workqueue(dev_priv->modeset_wq); + flush_workqueue(dev_priv->display.wq.modeset); intel_atomic_commit_tail(state); } @@ -7925,7 +7883,7 @@ static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv) if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) return false; - if (!dev_priv->vbt.int_crt_support) + if (!dev_priv->display.vbt.int_crt_support) return false; return true; @@ -8060,7 +8018,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { bool has_edp, has_port; - if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support) + if (IS_VALLEYVIEW(dev_priv) && dev_priv->display.vbt.int_crt_support) intel_crt_init(dev_priv); /* @@ -8319,7 +8277,7 @@ static const struct drm_mode_config_funcs intel_mode_funcs = { .atomic_state_free = intel_atomic_state_free, }; -static const struct drm_i915_display_funcs skl_display_funcs = { +static const struct intel_display_funcs skl_display_funcs = { .get_pipe_config = hsw_get_pipe_config, .crtc_enable = hsw_crtc_enable, .crtc_disable = hsw_crtc_disable, @@ -8327,7 +8285,7 @@ static const struct drm_i915_display_funcs skl_display_funcs = { .get_initial_plane_config = skl_get_initial_plane_config, }; -static const struct drm_i915_display_funcs ddi_display_funcs = { +static const struct intel_display_funcs ddi_display_funcs = { .get_pipe_config = hsw_get_pipe_config, .crtc_enable = hsw_crtc_enable, .crtc_disable = hsw_crtc_disable, @@ -8335,7 +8293,7 @@ static const struct drm_i915_display_funcs ddi_display_funcs = { .get_initial_plane_config = i9xx_get_initial_plane_config, }; -static const struct drm_i915_display_funcs pch_split_display_funcs = { +static const struct intel_display_funcs pch_split_display_funcs = { .get_pipe_config = ilk_get_pipe_config, .crtc_enable = ilk_crtc_enable, .crtc_disable = ilk_crtc_disable, @@ -8343,7 +8301,7 @@ static const struct drm_i915_display_funcs pch_split_display_funcs = { .get_initial_plane_config = i9xx_get_initial_plane_config, }; -static const struct drm_i915_display_funcs vlv_display_funcs = { +static const struct intel_display_funcs vlv_display_funcs = { .get_pipe_config = i9xx_get_pipe_config, .crtc_enable = valleyview_crtc_enable, .crtc_disable = i9xx_crtc_disable, @@ -8351,7 +8309,7 @@ static const struct drm_i915_display_funcs vlv_display_funcs = { .get_initial_plane_config = i9xx_get_initial_plane_config, }; -static const struct drm_i915_display_funcs i9xx_display_funcs = { +static const struct intel_display_funcs i9xx_display_funcs = { .get_pipe_config = i9xx_get_pipe_config, .crtc_enable = i9xx_crtc_enable, .crtc_disable = i9xx_crtc_disable, @@ -8374,16 +8332,16 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv) intel_dpll_init_clock_hook(dev_priv); if (DISPLAY_VER(dev_priv) >= 9) { - dev_priv->display = &skl_display_funcs; + dev_priv->display.funcs.display = &skl_display_funcs; } else if (HAS_DDI(dev_priv)) { - dev_priv->display = &ddi_display_funcs; + dev_priv->display.funcs.display = &ddi_display_funcs; } else if (HAS_PCH_SPLIT(dev_priv)) { - dev_priv->display = &pch_split_display_funcs; + dev_priv->display.funcs.display = &pch_split_display_funcs; } else if (IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv)) { - dev_priv->display = &vlv_display_funcs; + dev_priv->display.funcs.display = &vlv_display_funcs; } else { - dev_priv->display = &i9xx_display_funcs; + dev_priv->display.funcs.display = &i9xx_display_funcs; } intel_fdi_init_hook(dev_priv); @@ -8396,11 +8354,11 @@ void intel_modeset_init_hw(struct drm_i915_private *i915) if (!HAS_DISPLAY(i915)) return; - cdclk_state = to_intel_cdclk_state(i915->cdclk.obj.state); + cdclk_state = to_intel_cdclk_state(i915->display.cdclk.obj.state); intel_update_cdclk(i915); - intel_cdclk_dump_config(i915, &i915->cdclk.hw, "Current CDCLK"); - cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw; + intel_cdclk_dump_config(i915, &i915->display.cdclk.hw, "Current CDCLK"); + cdclk_state->logical = cdclk_state->actual = i915->display.cdclk.hw; } static int sanitize_watermarks_add_affected(struct drm_atomic_state *state) @@ -8456,7 +8414,7 @@ static void sanitize_watermarks(struct drm_i915_private *dev_priv) int i; /* Only supported on platforms that use atomic watermark design */ - if (!dev_priv->wm_disp->optimize_watermarks) + if (!dev_priv->display.funcs.wm->optimize_watermarks) return; state = drm_atomic_state_alloc(&dev_priv->drm); @@ -8688,11 +8646,9 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915) intel_dmc_ucode_init(i915); - i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0); - i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI | - WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE); - - i915->window2_delay = 0; /* No DSB so no window2 delay */ + i915->display.wq.modeset = alloc_ordered_workqueue("i915_modeset", 0); + i915->display.wq.flip = alloc_workqueue("i915_flip", WQ_HIGHPRI | + WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE); intel_mode_config_init(i915); @@ -8708,8 +8664,8 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915) if (ret) goto cleanup_vga_client_pw_domain_dmc; - init_llist_head(&i915->atomic_helper.free_list); - INIT_WORK(&i915->atomic_helper.free_work, + init_llist_head(&i915->display.atomic_helper.free_list); + INIT_WORK(&i915->display.atomic_helper.free_work, intel_atomic_helper_free_state_worker); intel_init_quirks(i915); @@ -8769,7 +8725,7 @@ int intel_modeset_init_nogem(struct drm_i915_private *i915) intel_hdcp_component_init(i915); - if (i915->max_cdclk_freq == 0) + if (i915->display.cdclk.max_cdclk_freq == 0) intel_update_max_cdclk(i915); /* @@ -8833,7 +8789,7 @@ int intel_modeset_init(struct drm_i915_private *i915) intel_hpd_init(i915); intel_hpd_poll_disable(i915); - intel_init_ipc(i915); + skl_watermark_ipc_init(i915); return 0; } @@ -8964,7 +8920,7 @@ void intel_display_resume(struct drm_device *dev) if (!ret) ret = __intel_display_resume(i915, state, &ctx); - intel_enable_ipc(i915); + skl_watermark_ipc_update(i915); drm_modeset_drop_locks(&ctx); drm_modeset_acquire_fini(&ctx); @@ -8999,11 +8955,18 @@ void intel_modeset_driver_remove(struct drm_i915_private *i915) if (!HAS_DISPLAY(i915)) return; - flush_workqueue(i915->flip_wq); - flush_workqueue(i915->modeset_wq); + flush_workqueue(i915->display.wq.flip); + flush_workqueue(i915->display.wq.modeset); + + flush_work(&i915->display.atomic_helper.free_work); + drm_WARN_ON(&i915->drm, !llist_empty(&i915->display.atomic_helper.free_list)); - flush_work(&i915->atomic_helper.free_work); - drm_WARN_ON(&i915->drm, !llist_empty(&i915->atomic_helper.free_list)); + /* + * MST topology needs to be suspended so we don't have any calls to + * fbdev after it's finalized. MST will be destroyed later as part of + * drm_mode_config_cleanup() + */ + intel_dp_mst_suspend(i915); } /* part #2: call after irq uninstall */ @@ -9018,13 +8981,6 @@ void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915) */ intel_hpd_poll_fini(i915); - /* - * MST topology needs to be suspended so we don't have any calls to - * fbdev after it's finalized. MST will be destroyed later as part of - * drm_mode_config_cleanup() - */ - intel_dp_mst_suspend(i915); - /* poll work can call into fbdev, hence clean that up afterwards */ intel_fbdev_fini(i915); @@ -9041,8 +8997,8 @@ void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915) intel_gmbus_teardown(i915); - destroy_workqueue(i915->flip_wq); - destroy_workqueue(i915->modeset_wq); + destroy_workqueue(i915->display.wq.flip); + destroy_workqueue(i915->display.wq.modeset); intel_fbc_cleanup(i915); } diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index cef251025d7a..884e8e67b17c 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -375,7 +375,7 @@ enum hpd_pin { #define for_each_pipe(__dev_priv, __p) \ for ((__p) = 0; (__p) < I915_MAX_PIPES; (__p)++) \ - for_each_if(INTEL_INFO(__dev_priv)->display.pipe_mask & BIT(__p)) + for_each_if(RUNTIME_INFO(__dev_priv)->pipe_mask & BIT(__p)) #define for_each_pipe_masked(__dev_priv, __p, __mask) \ for_each_pipe(__dev_priv, __p) \ @@ -383,7 +383,7 @@ enum hpd_pin { #define for_each_cpu_transcoder(__dev_priv, __t) \ for ((__t) = 0; (__t) < I915_MAX_TRANSCODERS; (__t)++) \ - for_each_if (INTEL_INFO(__dev_priv)->display.cpu_transcoder_mask & BIT(__t)) + for_each_if (RUNTIME_INFO(__dev_priv)->cpu_transcoder_mask & BIT(__t)) #define for_each_cpu_transcoder_masked(__dev_priv, __t, __mask) \ for_each_cpu_transcoder(__dev_priv, __t) \ @@ -547,7 +547,7 @@ u8 intel_calc_active_pipes(struct intel_atomic_state *state, void intel_link_compute_m_n(u16 bpp, int nlanes, int pixel_clock, int link_clock, struct intel_link_m_n *m_n, - bool constant_n, bool fec_enable); + bool fec_enable); u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, u32 pixel_format, u64 modifier); enum drm_mode_status diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h new file mode 100644 index 000000000000..96cf994b0ad1 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_display_core.h @@ -0,0 +1,418 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __INTEL_DISPLAY_CORE_H__ +#define __INTEL_DISPLAY_CORE_H__ + +#include <linux/list.h> +#include <linux/llist.h> +#include <linux/mutex.h> +#include <linux/types.h> +#include <linux/wait.h> +#include <linux/workqueue.h> + +#include <drm/drm_connector.h> + +#include "intel_cdclk.h" +#include "intel_display.h" +#include "intel_display_power.h" +#include "intel_dmc.h" +#include "intel_dpll_mgr.h" +#include "intel_fbc.h" +#include "intel_global_state.h" +#include "intel_gmbus.h" +#include "intel_opregion.h" +#include "intel_pm_types.h" + +struct drm_i915_private; +struct drm_property; +struct i915_audio_component; +struct i915_hdcp_comp_master; +struct intel_atomic_state; +struct intel_audio_funcs; +struct intel_bios_encoder_data; +struct intel_cdclk_funcs; +struct intel_cdclk_vals; +struct intel_color_funcs; +struct intel_crtc; +struct intel_crtc_state; +struct intel_dpll_funcs; +struct intel_dpll_mgr; +struct intel_fbdev; +struct intel_fdi_funcs; +struct intel_hotplug_funcs; +struct intel_initial_plane_config; +struct intel_overlay; + +/* Amount of SAGV/QGV points, BSpec precisely defines this */ +#define I915_NUM_QGV_POINTS 8 + +/* Amount of PSF GV points, BSpec precisely defines this */ +#define I915_NUM_PSF_GV_POINTS 3 + +struct intel_display_funcs { + /* + * Returns the active state of the crtc, and if the crtc is active, + * fills out the pipe-config with the hw state. + */ + bool (*get_pipe_config)(struct intel_crtc *, + struct intel_crtc_state *); + void (*get_initial_plane_config)(struct intel_crtc *, + struct intel_initial_plane_config *); + void (*crtc_enable)(struct intel_atomic_state *state, + struct intel_crtc *crtc); + void (*crtc_disable)(struct intel_atomic_state *state, + struct intel_crtc *crtc); + void (*commit_modeset_enables)(struct intel_atomic_state *state); +}; + +/* functions used for watermark calcs for display. */ +struct intel_wm_funcs { + /* update_wm is for legacy wm management */ + void (*update_wm)(struct drm_i915_private *dev_priv); + int (*compute_pipe_wm)(struct intel_atomic_state *state, + struct intel_crtc *crtc); + int (*compute_intermediate_wm)(struct intel_atomic_state *state, + struct intel_crtc *crtc); + void (*initial_watermarks)(struct intel_atomic_state *state, + struct intel_crtc *crtc); + void (*atomic_update_watermarks)(struct intel_atomic_state *state, + struct intel_crtc *crtc); + void (*optimize_watermarks)(struct intel_atomic_state *state, + struct intel_crtc *crtc); + int (*compute_global_watermarks)(struct intel_atomic_state *state); +}; + +struct intel_audio { + /* hda/i915 audio component */ + struct i915_audio_component *component; + bool component_registered; + /* mutex for audio/video sync */ + struct mutex mutex; + int power_refcount; + u32 freq_cntrl; + + /* Used to save the pipe-to-encoder mapping for audio */ + struct intel_encoder *encoder_map[I915_MAX_PIPES]; + + /* necessary resource sharing with HDMI LPE audio driver. */ + struct { + struct platform_device *platdev; + int irq; + } lpe; +}; + +/* + * dpll and cdclk state is protected by connection_mutex dpll.lock serializes + * intel_{prepare,enable,disable}_shared_dpll. Must be global rather than per + * dpll, because on some platforms plls share registers. + */ +struct intel_dpll { + struct mutex lock; + + int num_shared_dpll; + struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; + const struct intel_dpll_mgr *mgr; + + struct { + int nssc; + int ssc; + } ref_clks; +}; + +struct intel_frontbuffer_tracking { + spinlock_t lock; + + /* + * Tracking bits for delayed frontbuffer flushing du to gpu activity or + * scheduled flips. + */ + unsigned busy_bits; + unsigned flip_bits; +}; + +struct intel_hotplug { + struct delayed_work hotplug_work; + + const u32 *hpd, *pch_hpd; + + struct { + unsigned long last_jiffies; + int count; + enum { + HPD_ENABLED = 0, + HPD_DISABLED = 1, + HPD_MARK_DISABLED = 2 + } state; + } stats[HPD_NUM_PINS]; + u32 event_bits; + u32 retry_bits; + struct delayed_work reenable_work; + + u32 long_port_mask; + u32 short_port_mask; + struct work_struct dig_port_work; + + struct work_struct poll_init_work; + bool poll_enabled; + + unsigned int hpd_storm_threshold; + /* Whether or not to count short HPD IRQs in HPD storms */ + u8 hpd_short_storm_enabled; + + /* + * if we get a HPD irq from DP and a HPD irq from non-DP + * the non-DP HPD could block the workqueue on a mode config + * mutex getting, that userspace may have taken. However + * userspace is waiting on the DP workqueue to run which is + * blocked behind the non-DP one. + */ + struct workqueue_struct *dp_wq; +}; + +struct intel_vbt_data { + /* bdb version */ + u16 version; + + /* Feature bits */ + unsigned int int_tv_support:1; + unsigned int int_crt_support:1; + unsigned int lvds_use_ssc:1; + unsigned int int_lvds_support:1; + unsigned int display_clock_mode:1; + unsigned int fdi_rx_polarity_inverted:1; + int lvds_ssc_freq; + enum drm_panel_orientation orientation; + + bool override_afc_startup; + u8 override_afc_startup_val; + + int crt_ddc_pin; + + struct list_head display_devices; + struct list_head bdb_blocks; + + struct intel_bios_encoder_data *ports[I915_MAX_PORTS]; /* Non-NULL if port present. */ + struct sdvo_device_mapping { + u8 initialized; + u8 dvo_port; + u8 slave_addr; + u8 dvo_wiring; + u8 i2c_pin; + u8 ddc_pin; + } sdvo_mappings[2]; +}; + +struct intel_wm { + /* + * Raw watermark latency values: + * in 0.1us units for WM0, + * in 0.5us units for WM1+. + */ + /* primary */ + u16 pri_latency[5]; + /* sprite */ + u16 spr_latency[5]; + /* cursor */ + u16 cur_latency[5]; + /* + * Raw watermark memory latency values + * for SKL for all 8 levels + * in 1us units. + */ + u16 skl_latency[8]; + + /* current hardware state */ + union { + struct ilk_wm_values hw; + struct vlv_wm_values vlv; + struct g4x_wm_values g4x; + }; + + u8 max_level; + + /* + * Should be held around atomic WM register writing; also + * protects * intel_crtc->wm.active and + * crtc_state->wm.need_postvbl_update. + */ + struct mutex wm_mutex; + + bool ipc_enabled; +}; + +struct intel_display { + /* Display functions */ + struct { + /* Top level crtc-ish functions */ + const struct intel_display_funcs *display; + + /* Display CDCLK functions */ + const struct intel_cdclk_funcs *cdclk; + + /* Display pll funcs */ + const struct intel_dpll_funcs *dpll; + + /* irq display functions */ + const struct intel_hotplug_funcs *hotplug; + + /* pm display functions */ + const struct intel_wm_funcs *wm; + + /* fdi display functions */ + const struct intel_fdi_funcs *fdi; + + /* Display internal color functions */ + const struct intel_color_funcs *color; + + /* Display internal audio functions */ + const struct intel_audio_funcs *audio; + } funcs; + + /* Grouping using anonymous structs. Keep sorted. */ + struct intel_atomic_helper { + struct llist_head free_list; + struct work_struct free_work; + } atomic_helper; + + struct { + /* backlight registers and fields in struct intel_panel */ + struct mutex lock; + } backlight; + + struct { + struct intel_global_obj obj; + + struct intel_bw_info { + /* for each QGV point */ + unsigned int deratedbw[I915_NUM_QGV_POINTS]; + /* for each PSF GV point */ + unsigned int psf_bw[I915_NUM_PSF_GV_POINTS]; + u8 num_qgv_points; + u8 num_psf_gv_points; + u8 num_planes; + } max[6]; + } bw; + + struct { + /* The current hardware cdclk configuration */ + struct intel_cdclk_config hw; + + /* cdclk, divider, and ratio table from bspec */ + const struct intel_cdclk_vals *table; + + struct intel_global_obj obj; + + unsigned int max_cdclk_freq; + } cdclk; + + struct { + /* The current hardware dbuf configuration */ + u8 enabled_slices; + + struct intel_global_obj obj; + } dbuf; + + struct { + /* VLV/CHV/BXT/GLK DSI MMIO register base address */ + u32 mmio_base; + } dsi; + + struct { + /* list of fbdev register on this device */ + struct intel_fbdev *fbdev; + struct work_struct suspend_work; + } fbdev; + + struct { + unsigned int pll_freq; + u32 rx_config; + } fdi; + + struct { + /* + * Base address of where the gmbus and gpio blocks are located + * (either on PCH or on SoC for platforms without PCH). + */ + u32 mmio_base; + + /* + * gmbus.mutex protects against concurrent usage of the single + * hw gmbus controller on different i2c buses. + */ + struct mutex mutex; + + struct intel_gmbus *bus[GMBUS_NUM_PINS]; + + wait_queue_head_t wait_queue; + } gmbus; + + struct { + struct i915_hdcp_comp_master *master; + bool comp_added; + + /* Mutex to protect the above hdcp component related values. */ + struct mutex comp_mutex; + } hdcp; + + struct { + struct i915_power_domains domains; + + /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */ + u32 chv_phy_control; + + /* perform PHY state sanity checks? */ + bool chv_phy_assert[2]; + } power; + + struct { + u32 mmio_base; + + /* protects panel power sequencer state */ + struct mutex mutex; + } pps; + + struct { + struct drm_property *broadcast_rgb; + struct drm_property *force_audio; + } properties; + + struct { + unsigned long mask; + } quirks; + + struct { + enum { + I915_SAGV_UNKNOWN = 0, + I915_SAGV_DISABLED, + I915_SAGV_ENABLED, + I915_SAGV_NOT_CONTROLLED + } status; + + u32 block_time_us; + } sagv; + + struct { + /* ordered wq for modesets */ + struct workqueue_struct *modeset; + + /* unbound hipri wq for page flips/plane updates */ + struct workqueue_struct *flip; + } wq; + + /* Grouping using named structs. Keep sorted. */ + struct intel_audio audio; + struct intel_dmc dmc; + struct intel_dpll dpll; + struct intel_fbc *fbc[I915_MAX_FBCS]; + struct intel_frontbuffer_tracking fb_tracking; + struct intel_hotplug hotplug; + struct intel_opregion opregion; + struct intel_overlay *overlay; + struct intel_vbt_data vbt; + struct intel_wm wm; +}; + +#endif /* __INTEL_DISPLAY_CORE_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c index 6c3954479047..7c7253a2541c 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c @@ -26,6 +26,7 @@ #include "intel_pm.h" #include "intel_psr.h" #include "intel_sprite.h" +#include "skl_watermark.h" static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node) { @@ -37,10 +38,10 @@ static int i915_frontbuffer_tracking(struct seq_file *m, void *unused) struct drm_i915_private *dev_priv = node_to_i915(m->private); seq_printf(m, "FB tracking busy bits: 0x%08x\n", - dev_priv->fb_tracking.busy_bits); + dev_priv->display.fb_tracking.busy_bits); seq_printf(m, "FB tracking flip bits: 0x%08x\n", - dev_priv->fb_tracking.flip_bits); + dev_priv->display.fb_tracking.flip_bits); return 0; } @@ -103,7 +104,8 @@ static int i915_sr_status(struct seq_file *m, void *unused) static int i915_opregion(struct seq_file *m, void *unused) { - struct intel_opregion *opregion = &node_to_i915(m->private)->opregion; + struct drm_i915_private *i915 = node_to_i915(m->private); + struct intel_opregion *opregion = &i915->display.opregion; if (opregion->header) seq_write(m, opregion->header, OPREGION_SIZE); @@ -113,7 +115,8 @@ static int i915_opregion(struct seq_file *m, void *unused) static int i915_vbt(struct seq_file *m, void *unused) { - struct intel_opregion *opregion = &node_to_i915(m->private)->opregion; + struct drm_i915_private *i915 = node_to_i915(m->private); + struct intel_opregion *opregion = &i915->display.opregion; if (opregion->vbt) seq_write(m, opregion->vbt, opregion->vbt_size); @@ -129,7 +132,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) struct drm_framebuffer *drm_fb; #ifdef CONFIG_DRM_FBDEV_EMULATION - fbdev_fb = intel_fbdev_framebuffer(dev_priv->fbdev); + fbdev_fb = intel_fbdev_framebuffer(dev_priv->display.fbdev.fbdev); if (fbdev_fb) { seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", fbdev_fb->base.width, @@ -722,10 +725,11 @@ static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc) /* Not all platformas have a scaler */ if (num_scalers) { - seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d", + seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d scaling_filter=%d", num_scalers, crtc_state->scaler_state.scaler_users, - crtc_state->scaler_state.scaler_id); + crtc_state->scaler_state.scaler_id, + crtc_state->hw.scaling_filter); for (i = 0; i < num_scalers; i++) { const struct intel_scaler *sc = @@ -932,11 +936,11 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused) drm_modeset_lock_all(dev); seq_printf(m, "PLL refclks: non-SSC: %d kHz, SSC: %d kHz\n", - dev_priv->dpll.ref_clks.nssc, - dev_priv->dpll.ref_clks.ssc); + dev_priv->display.dpll.ref_clks.nssc, + dev_priv->display.dpll.ref_clks.ssc); - for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) { - struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i]; + for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) { + struct intel_shared_dpll *pll = &dev_priv->display.dpll.shared_dplls[i]; seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name, pll->info->id); @@ -979,58 +983,6 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused) return 0; } -static int i915_ipc_status_show(struct seq_file *m, void *data) -{ - struct drm_i915_private *dev_priv = m->private; - - seq_printf(m, "Isochronous Priority Control: %s\n", - str_yes_no(dev_priv->ipc_enabled)); - return 0; -} - -static int i915_ipc_status_open(struct inode *inode, struct file *file) -{ - struct drm_i915_private *dev_priv = inode->i_private; - - if (!HAS_IPC(dev_priv)) - return -ENODEV; - - return single_open(file, i915_ipc_status_show, dev_priv); -} - -static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf, - size_t len, loff_t *offp) -{ - struct seq_file *m = file->private_data; - struct drm_i915_private *dev_priv = m->private; - intel_wakeref_t wakeref; - bool enable; - int ret; - - ret = kstrtobool_from_user(ubuf, len, &enable); - if (ret < 0) - return ret; - - with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) { - if (!dev_priv->ipc_enabled && enable) - drm_info(&dev_priv->drm, - "Enabling IPC: WM will be proper only after next commit\n"); - dev_priv->ipc_enabled = enable; - intel_enable_ipc(dev_priv); - } - - return len; -} - -static const struct file_operations i915_ipc_status_fops = { - .owner = THIS_MODULE, - .open = i915_ipc_status_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .write = i915_ipc_status_write -}; - static int i915_ddb_info(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); @@ -1427,9 +1379,9 @@ static int pri_wm_latency_show(struct seq_file *m, void *data) const u16 *latencies; if (DISPLAY_VER(dev_priv) >= 9) - latencies = dev_priv->wm.skl_latency; + latencies = dev_priv->display.wm.skl_latency; else - latencies = dev_priv->wm.pri_latency; + latencies = dev_priv->display.wm.pri_latency; wm_latency_show(m, latencies); @@ -1442,9 +1394,9 @@ static int spr_wm_latency_show(struct seq_file *m, void *data) const u16 *latencies; if (DISPLAY_VER(dev_priv) >= 9) - latencies = dev_priv->wm.skl_latency; + latencies = dev_priv->display.wm.skl_latency; else - latencies = dev_priv->wm.spr_latency; + latencies = dev_priv->display.wm.spr_latency; wm_latency_show(m, latencies); @@ -1457,9 +1409,9 @@ static int cur_wm_latency_show(struct seq_file *m, void *data) const u16 *latencies; if (DISPLAY_VER(dev_priv) >= 9) - latencies = dev_priv->wm.skl_latency; + latencies = dev_priv->display.wm.skl_latency; else - latencies = dev_priv->wm.cur_latency; + latencies = dev_priv->display.wm.cur_latency; wm_latency_show(m, latencies); @@ -1550,9 +1502,9 @@ static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf, u16 *latencies; if (DISPLAY_VER(dev_priv) >= 9) - latencies = dev_priv->wm.skl_latency; + latencies = dev_priv->display.wm.skl_latency; else - latencies = dev_priv->wm.pri_latency; + latencies = dev_priv->display.wm.pri_latency; return wm_latency_write(file, ubuf, len, offp, latencies); } @@ -1565,9 +1517,9 @@ static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf, u16 *latencies; if (DISPLAY_VER(dev_priv) >= 9) - latencies = dev_priv->wm.skl_latency; + latencies = dev_priv->display.wm.skl_latency; else - latencies = dev_priv->wm.spr_latency; + latencies = dev_priv->display.wm.spr_latency; return wm_latency_write(file, ubuf, len, offp, latencies); } @@ -1580,9 +1532,9 @@ static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf, u16 *latencies; if (DISPLAY_VER(dev_priv) >= 9) - latencies = dev_priv->wm.skl_latency; + latencies = dev_priv->display.wm.skl_latency; else - latencies = dev_priv->wm.cur_latency; + latencies = dev_priv->display.wm.cur_latency; return wm_latency_write(file, ubuf, len, offp, latencies); } @@ -1617,14 +1569,14 @@ static const struct file_operations i915_cur_wm_latency_fops = { static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = m->private; - struct i915_hotplug *hotplug = &dev_priv->hotplug; + struct intel_hotplug *hotplug = &dev_priv->display.hotplug; /* Synchronize with everything first in case there's been an HPD * storm, but we haven't finished handling it in the kernel yet */ intel_synchronize_irq(dev_priv); - flush_work(&dev_priv->hotplug.dig_port_work); - flush_delayed_work(&dev_priv->hotplug.hotplug_work); + flush_work(&dev_priv->display.hotplug.dig_port_work); + flush_delayed_work(&dev_priv->display.hotplug.hotplug_work); seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold); seq_printf(m, "Detected: %s\n", @@ -1639,7 +1591,7 @@ static ssize_t i915_hpd_storm_ctl_write(struct file *file, { struct seq_file *m = file->private_data; struct drm_i915_private *dev_priv = m->private; - struct i915_hotplug *hotplug = &dev_priv->hotplug; + struct intel_hotplug *hotplug = &dev_priv->display.hotplug; unsigned int new_threshold; int i; char *newline; @@ -1678,7 +1630,7 @@ static ssize_t i915_hpd_storm_ctl_write(struct file *file, spin_unlock_irq(&dev_priv->irq_lock); /* Re-enable hpd immediately if we were in an irq storm */ - flush_delayed_work(&dev_priv->hotplug.reenable_work); + flush_delayed_work(&dev_priv->display.hotplug.reenable_work); return len; } @@ -1702,7 +1654,7 @@ static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data) struct drm_i915_private *dev_priv = m->private; seq_printf(m, "Enabled: %s\n", - str_yes_no(dev_priv->hotplug.hpd_short_storm_enabled)); + str_yes_no(dev_priv->display.hotplug.hpd_short_storm_enabled)); return 0; } @@ -1720,7 +1672,7 @@ static ssize_t i915_hpd_short_storm_ctl_write(struct file *file, { struct seq_file *m = file->private_data; struct drm_i915_private *dev_priv = m->private; - struct i915_hotplug *hotplug = &dev_priv->hotplug; + struct intel_hotplug *hotplug = &dev_priv->display.hotplug; char *newline; char tmp[16]; int i; @@ -1756,7 +1708,7 @@ static ssize_t i915_hpd_short_storm_ctl_write(struct file *file, spin_unlock_irq(&dev_priv->irq_lock); /* Re-enable hpd immediately if we were in an irq storm */ - flush_delayed_work(&dev_priv->hotplug.reenable_work); + flush_delayed_work(&dev_priv->display.hotplug.reenable_work); return len; } @@ -1907,7 +1859,6 @@ static const struct { {"i915_dp_test_active", &i915_displayport_test_active_fops}, {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops}, {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops}, - {"i915_ipc_status", &i915_ipc_status_fops}, {"i915_drrs_ctl", &i915_drrs_ctl_fops}, {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}, }; @@ -1931,6 +1882,7 @@ void intel_display_debugfs_register(struct drm_i915_private *i915) intel_dmc_debugfs_register(i915); intel_fbc_debugfs_register(i915); + skl_watermark_ipc_debugfs_register(i915); } static int i915_panel_show(struct seq_file *m, void *data) @@ -2137,7 +2089,7 @@ static const struct file_operations i915_dsc_fec_support_fops = { .write = i915_dsc_fec_support_write }; -static int i915_dsc_bpp_show(struct seq_file *m, void *data) +static int i915_dsc_bpc_show(struct seq_file *m, void *data) { struct drm_connector *connector = m->private; struct drm_device *dev = connector->dev; @@ -2160,14 +2112,14 @@ static int i915_dsc_bpp_show(struct seq_file *m, void *data) } crtc_state = to_intel_crtc_state(crtc->state); - seq_printf(m, "Compressed_BPP: %d\n", crtc_state->dsc.compressed_bpp); + seq_printf(m, "Input_BPC: %d\n", crtc_state->dsc.config.bits_per_component); out: drm_modeset_unlock(&dev->mode_config.connection_mutex); return ret; } -static ssize_t i915_dsc_bpp_write(struct file *file, +static ssize_t i915_dsc_bpc_write(struct file *file, const char __user *ubuf, size_t len, loff_t *offp) { @@ -2175,33 +2127,32 @@ static ssize_t i915_dsc_bpp_write(struct file *file, ((struct seq_file *)file->private_data)->private; struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector)); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - int dsc_bpp = 0; + int dsc_bpc = 0; int ret; - ret = kstrtoint_from_user(ubuf, len, 0, &dsc_bpp); + ret = kstrtoint_from_user(ubuf, len, 0, &dsc_bpc); if (ret < 0) return ret; - intel_dp->force_dsc_bpp = dsc_bpp; + intel_dp->force_dsc_bpc = dsc_bpc; *offp += len; return len; } -static int i915_dsc_bpp_open(struct inode *inode, +static int i915_dsc_bpc_open(struct inode *inode, struct file *file) { - return single_open(file, i915_dsc_bpp_show, - inode->i_private); + return single_open(file, i915_dsc_bpc_show, inode->i_private); } -static const struct file_operations i915_dsc_bpp_fops = { +static const struct file_operations i915_dsc_bpc_fops = { .owner = THIS_MODULE, - .open = i915_dsc_bpp_open, + .open = i915_dsc_bpc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, - .write = i915_dsc_bpp_write + .write = i915_dsc_bpc_write }; /* @@ -2271,8 +2222,8 @@ void intel_connector_debugfs_add(struct intel_connector *intel_connector) debugfs_create_file("i915_dsc_fec_support", 0644, root, connector, &i915_dsc_fec_support_fops); - debugfs_create_file("i915_dsc_bpp", 0644, root, - connector, &i915_dsc_bpp_fops); + debugfs_create_file("i915_dsc_bpc", 0644, root, + connector, &i915_dsc_bpc_fops); } if (connector->connector_type == DRM_MODE_CONNECTOR_DSI || diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 589af257edeb..1e608b9e5055 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -7,6 +7,7 @@ #include "i915_drv.h" #include "i915_irq.h" +#include "intel_backlight_regs.h" #include "intel_cdclk.h" #include "intel_combo_phy.h" #include "intel_de.h" @@ -18,8 +19,8 @@ #include "intel_mchbar_regs.h" #include "intel_pch_refclk.h" #include "intel_pcode.h" -#include "intel_pm.h" #include "intel_snps_phy.h" +#include "skl_watermark.h" #include "vlv_sideband.h" #define for_each_power_domain_well(__dev_priv, __power_well, __domain) \ @@ -243,7 +244,7 @@ bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, struct i915_power_domains *power_domains; bool ret; - power_domains = &dev_priv->power_domains; + power_domains = &dev_priv->display.power.domains; mutex_lock(&power_domains->lock); ret = __intel_display_power_is_enabled(dev_priv, domain); @@ -268,7 +269,7 @@ sanitize_target_dc_state(struct drm_i915_private *dev_priv, if (target_dc_state != states[i]) continue; - if (dev_priv->dmc.allowed_dc_mask & target_dc_state) + if (dev_priv->display.dmc.allowed_dc_mask & target_dc_state) break; target_dc_state = states[i + 1]; @@ -291,7 +292,7 @@ void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv, { struct i915_power_well *power_well; bool dc_off_enabled; - struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_domains *power_domains = &dev_priv->display.power.domains; mutex_lock(&power_domains->lock); power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF); @@ -301,7 +302,7 @@ void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv, state = sanitize_target_dc_state(dev_priv, state); - if (state == dev_priv->dmc.target_dc_state) + if (state == dev_priv->display.dmc.target_dc_state) goto unlock; dc_off_enabled = intel_power_well_is_enabled(dev_priv, power_well); @@ -312,7 +313,7 @@ void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv, if (!dc_off_enabled) intel_power_well_enable(dev_priv, power_well); - dev_priv->dmc.target_dc_state = state; + dev_priv->display.dmc.target_dc_state = state; if (!dc_off_enabled) intel_power_well_disable(dev_priv, power_well); @@ -339,7 +340,7 @@ assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains) { struct drm_i915_private *i915 = container_of(power_domains, struct drm_i915_private, - power_domains); + display.power.domains); return !drm_WARN_ON(&i915->drm, bitmap_intersects(power_domains->async_put_domains[0].bits, @@ -352,7 +353,7 @@ __async_put_domains_state_ok(struct i915_power_domains *power_domains) { struct drm_i915_private *i915 = container_of(power_domains, struct drm_i915_private, - power_domains); + display.power.domains); struct intel_power_domain_mask async_put_mask; enum intel_display_power_domain domain; bool err = false; @@ -375,7 +376,7 @@ static void print_power_domains(struct i915_power_domains *power_domains, { struct drm_i915_private *i915 = container_of(power_domains, struct drm_i915_private, - power_domains); + display.power.domains); enum intel_display_power_domain domain; drm_dbg(&i915->drm, "%s (%d):\n", prefix, bitmap_weight(mask->bits, POWER_DOMAIN_NUM)); @@ -390,7 +391,7 @@ print_async_put_domains_state(struct i915_power_domains *power_domains) { struct drm_i915_private *i915 = container_of(power_domains, struct drm_i915_private, - power_domains); + display.power.domains); drm_dbg(&i915->drm, "async_put_wakeref %u\n", power_domains->async_put_wakeref); @@ -445,7 +446,7 @@ static bool intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain) { - struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_domains *power_domains = &dev_priv->display.power.domains; struct intel_power_domain_mask async_put_mask; bool ret = false; @@ -474,7 +475,7 @@ static void __intel_display_power_get_domain(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain) { - struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_domains *power_domains = &dev_priv->display.power.domains; struct i915_power_well *power_well; if (intel_display_power_grab_async_put_ref(dev_priv, domain)) @@ -501,7 +502,7 @@ __intel_display_power_get_domain(struct drm_i915_private *dev_priv, intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain) { - struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_domains *power_domains = &dev_priv->display.power.domains; intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); mutex_lock(&power_domains->lock); @@ -527,7 +528,7 @@ intel_wakeref_t intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain) { - struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_domains *power_domains = &dev_priv->display.power.domains; intel_wakeref_t wakeref; bool is_enabled; @@ -563,7 +564,7 @@ __intel_display_power_put_domain(struct drm_i915_private *dev_priv, const char *name = intel_display_power_domain_str(domain); struct intel_power_domain_mask async_put_mask; - power_domains = &dev_priv->power_domains; + power_domains = &dev_priv->display.power.domains; drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain], "Use count on domain %s is already zero\n", @@ -583,7 +584,7 @@ __intel_display_power_put_domain(struct drm_i915_private *dev_priv, static void __intel_display_power_put(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain) { - struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_domains *power_domains = &dev_priv->display.power.domains; mutex_lock(&power_domains->lock); __intel_display_power_put_domain(dev_priv, domain); @@ -596,7 +597,7 @@ queue_async_put_domains_work(struct i915_power_domains *power_domains, { struct drm_i915_private *i915 = container_of(power_domains, struct drm_i915_private, - power_domains); + display.power.domains); drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref); power_domains->async_put_wakeref = wakeref; drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq, @@ -610,7 +611,7 @@ release_async_put_domains(struct i915_power_domains *power_domains, { struct drm_i915_private *dev_priv = container_of(power_domains, struct drm_i915_private, - power_domains); + display.power.domains); struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; enum intel_display_power_domain domain; intel_wakeref_t wakeref; @@ -637,8 +638,8 @@ intel_display_power_put_async_work(struct work_struct *work) { struct drm_i915_private *dev_priv = container_of(work, struct drm_i915_private, - power_domains.async_put_work.work); - struct i915_power_domains *power_domains = &dev_priv->power_domains; + display.power.domains.async_put_work.work); + struct i915_power_domains *power_domains = &dev_priv->display.power.domains; struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm); intel_wakeref_t old_work_wakeref = 0; @@ -698,7 +699,7 @@ void __intel_display_power_put_async(struct drm_i915_private *i915, enum intel_display_power_domain domain, intel_wakeref_t wakeref) { - struct i915_power_domains *power_domains = &i915->power_domains; + struct i915_power_domains *power_domains = &i915->display.power.domains; struct intel_runtime_pm *rpm = &i915->runtime_pm; intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm); @@ -746,7 +747,7 @@ out_verify: */ void intel_display_power_flush_work(struct drm_i915_private *i915) { - struct i915_power_domains *power_domains = &i915->power_domains; + struct i915_power_domains *power_domains = &i915->display.power.domains; struct intel_power_domain_mask async_put_mask; intel_wakeref_t work_wakeref; @@ -779,7 +780,7 @@ out_verify: static void intel_display_power_flush_work_sync(struct drm_i915_private *i915) { - struct i915_power_domains *power_domains = &i915->power_domains; + struct i915_power_domains *power_domains = &i915->display.power.domains; intel_display_power_flush_work(i915); cancel_delayed_work_sync(&power_domains->async_put_work); @@ -908,7 +909,7 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv, return 0; if (IS_DG2(dev_priv)) - max_dc = 0; + max_dc = 1; else if (IS_DG1(dev_priv)) max_dc = 3; else if (DISPLAY_VER(dev_priv) >= 12) @@ -976,15 +977,15 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv, */ int intel_power_domains_init(struct drm_i915_private *dev_priv) { - struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_domains *power_domains = &dev_priv->display.power.domains; dev_priv->params.disable_power_well = sanitize_disable_power_well_option(dev_priv, dev_priv->params.disable_power_well); - dev_priv->dmc.allowed_dc_mask = + dev_priv->display.dmc.allowed_dc_mask = get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc); - dev_priv->dmc.target_dc_state = + dev_priv->display.dmc.target_dc_state = sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); mutex_init(&power_domains->lock); @@ -1003,12 +1004,12 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) */ void intel_power_domains_cleanup(struct drm_i915_private *dev_priv) { - intel_display_power_map_cleanup(&dev_priv->power_domains); + intel_display_power_map_cleanup(&dev_priv->display.power.domains); } static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) { - struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_domains *power_domains = &dev_priv->display.power.domains; struct i915_power_well *power_well; mutex_lock(&power_domains->lock); @@ -1037,7 +1038,7 @@ static void gen9_dbuf_slice_set(struct drm_i915_private *dev_priv, void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv, u8 req_slices) { - struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_domains *power_domains = &dev_priv->display.power.domains; u8 slice_mask = INTEL_INFO(dev_priv)->display.dbuf.slice_mask; enum dbuf_slice slice; @@ -1060,14 +1061,14 @@ void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv, for_each_dbuf_slice(dev_priv, slice) gen9_dbuf_slice_set(dev_priv, slice, req_slices & BIT(slice)); - dev_priv->dbuf.enabled_slices = req_slices; + dev_priv->display.dbuf.enabled_slices = req_slices; mutex_unlock(&power_domains->lock); } static void gen9_dbuf_enable(struct drm_i915_private *dev_priv) { - dev_priv->dbuf.enabled_slices = + dev_priv->display.dbuf.enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv); /* @@ -1075,7 +1076,7 @@ static void gen9_dbuf_enable(struct drm_i915_private *dev_priv) * figure out later which slices we have and what we need. */ gen9_dbuf_slices_update(dev_priv, BIT(DBUF_S1) | - dev_priv->dbuf.enabled_slices); + dev_priv->display.dbuf.enabled_slices); } static void gen9_dbuf_disable(struct drm_i915_private *dev_priv) @@ -1101,7 +1102,7 @@ static void icl_mbus_init(struct drm_i915_private *dev_priv) unsigned long abox_regs = INTEL_INFO(dev_priv)->display.abox_mask; u32 mask, val, i; - if (IS_ALDERLAKE_P(dev_priv)) + if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) return; mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK | @@ -1309,7 +1310,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); intel_update_cdclk(dev_priv); - intel_cdclk_dump_config(dev_priv, &dev_priv->cdclk.hw, "Current CDCLK"); + intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "Current CDCLK"); } /* @@ -1381,6 +1382,9 @@ static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv, reset_bits = RESET_PCH_HANDSHAKE_ENABLE; } + if (DISPLAY_VER(dev_priv) >= 14) + reset_bits |= MTL_RESET_PICA_HANDSHAKE_EN; + val = intel_de_read(dev_priv, reg); if (enable) @@ -1394,7 +1398,7 @@ static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv, static void skl_display_core_init(struct drm_i915_private *dev_priv, bool resume) { - struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_domains *power_domains = &dev_priv->display.power.domains; struct i915_power_well *well; gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); @@ -1426,13 +1430,14 @@ static void skl_display_core_init(struct drm_i915_private *dev_priv, static void skl_display_core_uninit(struct drm_i915_private *dev_priv) { - struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_domains *power_domains = &dev_priv->display.power.domains; struct i915_power_well *well; if (!HAS_DISPLAY(dev_priv)) return; gen9_disable_dc_states(dev_priv); + /* TODO: disable DMC program */ gen9_dbuf_disable(dev_priv); @@ -1459,7 +1464,7 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv) static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume) { - struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_domains *power_domains = &dev_priv->display.power.domains; struct i915_power_well *well; gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); @@ -1493,13 +1498,14 @@ static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume static void bxt_display_core_uninit(struct drm_i915_private *dev_priv) { - struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_domains *power_domains = &dev_priv->display.power.domains; struct i915_power_well *well; if (!HAS_DISPLAY(dev_priv)) return; gen9_disable_dc_states(dev_priv); + /* TODO: disable DMC program */ gen9_dbuf_disable(dev_priv); @@ -1601,7 +1607,7 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv) static void icl_display_core_init(struct drm_i915_private *dev_priv, bool resume) { - struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_domains *power_domains = &dev_priv->display.power.domains; struct i915_power_well *well; u32 val; @@ -1668,13 +1674,14 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv, static void icl_display_core_uninit(struct drm_i915_private *dev_priv) { - struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_domains *power_domains = &dev_priv->display.power.domains; struct i915_power_well *well; if (!HAS_DISPLAY(dev_priv)) return; gen9_disable_dc_states(dev_priv); + intel_dmc_disable_program(dev_priv); /* 1. Disable all display engine functions -> aready done */ @@ -1712,7 +1719,7 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv) * power well state and lane status to reconstruct the * expected initial value. */ - dev_priv->chv_phy_control = + dev_priv->display.power.chv_phy_control = PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) | PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) | PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) | @@ -1734,27 +1741,27 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv) if (mask == 0xf) mask = 0x0; else - dev_priv->chv_phy_control |= + dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0); - dev_priv->chv_phy_control |= + dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0); mask = (status & DPLL_PORTC_READY_MASK) >> 4; if (mask == 0xf) mask = 0x0; else - dev_priv->chv_phy_control |= + dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1); - dev_priv->chv_phy_control |= + dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1); - dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0); + dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0); - dev_priv->chv_phy_assert[DPIO_PHY0] = false; + dev_priv->display.power.chv_phy_assert[DPIO_PHY0] = false; } else { - dev_priv->chv_phy_assert[DPIO_PHY0] = true; + dev_priv->display.power.chv_phy_assert[DPIO_PHY0] = true; } if (intel_power_well_is_enabled(dev_priv, cmn_d)) { @@ -1766,21 +1773,21 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv) if (mask == 0xf) mask = 0x0; else - dev_priv->chv_phy_control |= + dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0); - dev_priv->chv_phy_control |= + dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0); - dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1); + dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1); - dev_priv->chv_phy_assert[DPIO_PHY1] = false; + dev_priv->display.power.chv_phy_assert[DPIO_PHY1] = false; } else { - dev_priv->chv_phy_assert[DPIO_PHY1] = true; + dev_priv->display.power.chv_phy_assert[DPIO_PHY1] = true; } drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n", - dev_priv->chv_phy_control); + dev_priv->display.power.chv_phy_control); /* Defer application of initial phy_control to enabling the powerwell */ } @@ -1864,7 +1871,7 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv); */ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume) { - struct i915_power_domains *power_domains = &i915->power_domains; + struct i915_power_domains *power_domains = &i915->display.power.domains; power_domains->initializing = true; @@ -1905,8 +1912,8 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume) /* Disable power support if the user asked so. */ if (!i915->params.disable_power_well) { drm_WARN_ON(&i915->drm, power_domains->disable_wakeref); - i915->power_domains.disable_wakeref = intel_display_power_get(i915, - POWER_DOMAIN_INIT); + i915->display.power.domains.disable_wakeref = intel_display_power_get(i915, + POWER_DOMAIN_INIT); } intel_power_domains_sync_hw(i915); @@ -1927,12 +1934,12 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume) void intel_power_domains_driver_remove(struct drm_i915_private *i915) { intel_wakeref_t wakeref __maybe_unused = - fetch_and_zero(&i915->power_domains.init_wakeref); + fetch_and_zero(&i915->display.power.domains.init_wakeref); /* Remove the refcount we took to keep power well support disabled. */ if (!i915->params.disable_power_well) intel_display_power_put(i915, POWER_DOMAIN_INIT, - fetch_and_zero(&i915->power_domains.disable_wakeref)); + fetch_and_zero(&i915->display.power.domains.disable_wakeref)); intel_display_power_flush_work_sync(i915); @@ -1954,7 +1961,7 @@ void intel_power_domains_driver_remove(struct drm_i915_private *i915) */ void intel_power_domains_sanitize_state(struct drm_i915_private *i915) { - struct i915_power_domains *power_domains = &i915->power_domains; + struct i915_power_domains *power_domains = &i915->display.power.domains; struct i915_power_well *power_well; mutex_lock(&power_domains->lock); @@ -1988,7 +1995,7 @@ void intel_power_domains_sanitize_state(struct drm_i915_private *i915) void intel_power_domains_enable(struct drm_i915_private *i915) { intel_wakeref_t wakeref __maybe_unused = - fetch_and_zero(&i915->power_domains.init_wakeref); + fetch_and_zero(&i915->display.power.domains.init_wakeref); intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); intel_power_domains_verify_state(i915); @@ -2003,7 +2010,7 @@ void intel_power_domains_enable(struct drm_i915_private *i915) */ void intel_power_domains_disable(struct drm_i915_private *i915) { - struct i915_power_domains *power_domains = &i915->power_domains; + struct i915_power_domains *power_domains = &i915->display.power.domains; drm_WARN_ON(&i915->drm, power_domains->init_wakeref); power_domains->init_wakeref = @@ -2026,7 +2033,7 @@ void intel_power_domains_disable(struct drm_i915_private *i915) void intel_power_domains_suspend(struct drm_i915_private *i915, enum i915_drm_suspend_mode suspend_mode) { - struct i915_power_domains *power_domains = &i915->power_domains; + struct i915_power_domains *power_domains = &i915->display.power.domains; intel_wakeref_t wakeref __maybe_unused = fetch_and_zero(&power_domains->init_wakeref); @@ -2039,7 +2046,7 @@ void intel_power_domains_suspend(struct drm_i915_private *i915, * resources as required and also enable deeper system power states * that would be blocked if the firmware was inactive. */ - if (!(i915->dmc.allowed_dc_mask & DC_STATE_EN_DC9) && + if (!(i915->display.dmc.allowed_dc_mask & DC_STATE_EN_DC9) && suspend_mode == I915_DRM_SUSPEND_IDLE && intel_dmc_has_payload(i915)) { intel_display_power_flush_work(i915); @@ -2053,7 +2060,7 @@ void intel_power_domains_suspend(struct drm_i915_private *i915, */ if (!i915->params.disable_power_well) intel_display_power_put(i915, POWER_DOMAIN_INIT, - fetch_and_zero(&i915->power_domains.disable_wakeref)); + fetch_and_zero(&i915->display.power.domains.disable_wakeref)); intel_display_power_flush_work(i915); intel_power_domains_verify_state(i915); @@ -2080,7 +2087,7 @@ void intel_power_domains_suspend(struct drm_i915_private *i915, */ void intel_power_domains_resume(struct drm_i915_private *i915) { - struct i915_power_domains *power_domains = &i915->power_domains; + struct i915_power_domains *power_domains = &i915->display.power.domains; if (power_domains->display_core_suspended) { intel_power_domains_init_hw(i915, true); @@ -2098,7 +2105,7 @@ void intel_power_domains_resume(struct drm_i915_private *i915) static void intel_power_domains_dump_info(struct drm_i915_private *i915) { - struct i915_power_domains *power_domains = &i915->power_domains; + struct i915_power_domains *power_domains = &i915->display.power.domains; struct i915_power_well *power_well; for_each_power_well(i915, power_well) { @@ -2126,7 +2133,7 @@ static void intel_power_domains_dump_info(struct drm_i915_private *i915) */ static void intel_power_domains_verify_state(struct drm_i915_private *i915) { - struct i915_power_domains *power_domains = &i915->power_domains; + struct i915_power_domains *power_domains = &i915->display.power.domains; struct i915_power_well *power_well; bool dump_domain_info; @@ -2232,10 +2239,10 @@ void intel_display_power_resume(struct drm_i915_private *i915) bxt_disable_dc9(i915); icl_display_core_init(i915, true); if (intel_dmc_has_payload(i915)) { - if (i915->dmc.allowed_dc_mask & + if (i915->display.dmc.allowed_dc_mask & DC_STATE_EN_UPTO_DC6) skl_enable_dc6(i915); - else if (i915->dmc.allowed_dc_mask & + else if (i915->display.dmc.allowed_dc_mask & DC_STATE_EN_UPTO_DC5) gen9_enable_dc5(i915); } @@ -2243,7 +2250,7 @@ void intel_display_power_resume(struct drm_i915_private *i915) bxt_disable_dc9(i915); bxt_display_core_init(i915, true); if (intel_dmc_has_payload(i915) && - (i915->dmc.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)) + (i915->display.dmc.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)) gen9_enable_dc5(i915); } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { hsw_disable_pc8(i915); @@ -2252,7 +2259,7 @@ void intel_display_power_resume(struct drm_i915_private *i915) void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m) { - struct i915_power_domains *power_domains = &i915->power_domains; + struct i915_power_domains *power_domains = &i915->display.power.domains; int i; mutex_lock(&power_domains->lock); diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c index 97b367f39f35..dc04afc6cc8f 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_map.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c @@ -1350,6 +1350,117 @@ static const struct i915_power_well_desc_list xelpd_power_wells[] = { I915_PW_DESCRIPTORS(xelpd_power_wells_main), }; +/* + * MTL is based on XELPD power domains with the exception of power gating for: + * - DDI_IO (moved to PLL logic) + * - AUX and AUX_IO functionality and register access for USBC1-4 (PICA always-on) + */ +#define XELPDP_PW_2_POWER_DOMAINS \ + XELPD_PW_B_POWER_DOMAINS, \ + XELPD_PW_C_POWER_DOMAINS, \ + XELPD_PW_D_POWER_DOMAINS, \ + POWER_DOMAIN_AUDIO_PLAYBACK, \ + POWER_DOMAIN_VGA, \ + POWER_DOMAIN_PORT_DDI_LANES_TC1, \ + POWER_DOMAIN_PORT_DDI_LANES_TC2, \ + POWER_DOMAIN_PORT_DDI_LANES_TC3, \ + POWER_DOMAIN_PORT_DDI_LANES_TC4 + +I915_DECL_PW_DOMAINS(xelpdp_pwdoms_pw_2, + XELPDP_PW_2_POWER_DOMAINS, + POWER_DOMAIN_INIT); + +I915_DECL_PW_DOMAINS(xelpdp_pwdoms_dc_off, + XELPDP_PW_2_POWER_DOMAINS, + POWER_DOMAIN_AUDIO_MMIO, + POWER_DOMAIN_MODESET, + POWER_DOMAIN_AUX_A, + POWER_DOMAIN_AUX_B, + POWER_DOMAIN_INIT); + +I915_DECL_PW_DOMAINS(xelpdp_pwdoms_aux_tc1, + POWER_DOMAIN_AUX_USBC1, + POWER_DOMAIN_AUX_TBT1); + +I915_DECL_PW_DOMAINS(xelpdp_pwdoms_aux_tc2, + POWER_DOMAIN_AUX_USBC2, + POWER_DOMAIN_AUX_TBT2); + +I915_DECL_PW_DOMAINS(xelpdp_pwdoms_aux_tc3, + POWER_DOMAIN_AUX_USBC3, + POWER_DOMAIN_AUX_TBT3); + +I915_DECL_PW_DOMAINS(xelpdp_pwdoms_aux_tc4, + POWER_DOMAIN_AUX_USBC4, + POWER_DOMAIN_AUX_TBT4); + +static const struct i915_power_well_desc xelpdp_power_wells_main[] = { + { + .instances = &I915_PW_INSTANCES( + I915_PW("DC_off", &xelpdp_pwdoms_dc_off, + .id = SKL_DISP_DC_OFF), + ), + .ops = &gen9_dc_off_power_well_ops, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("PW_2", &xelpdp_pwdoms_pw_2, + .hsw.idx = ICL_PW_CTL_IDX_PW_2, + .id = SKL_DISP_PW_2), + ), + .ops = &hsw_power_well_ops, + .has_vga = true, + .has_fuses = true, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("PW_A", &xelpd_pwdoms_pw_a, + .hsw.idx = XELPD_PW_CTL_IDX_PW_A), + ), + .ops = &hsw_power_well_ops, + .irq_pipe_mask = BIT(PIPE_A), + .has_fuses = true, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("PW_B", &xelpd_pwdoms_pw_b, + .hsw.idx = XELPD_PW_CTL_IDX_PW_B), + ), + .ops = &hsw_power_well_ops, + .irq_pipe_mask = BIT(PIPE_B), + .has_fuses = true, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("PW_C", &xelpd_pwdoms_pw_c, + .hsw.idx = XELPD_PW_CTL_IDX_PW_C), + ), + .ops = &hsw_power_well_ops, + .irq_pipe_mask = BIT(PIPE_C), + .has_fuses = true, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("PW_D", &xelpd_pwdoms_pw_d, + .hsw.idx = XELPD_PW_CTL_IDX_PW_D), + ), + .ops = &hsw_power_well_ops, + .irq_pipe_mask = BIT(PIPE_D), + .has_fuses = true, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("AUX_A", &icl_pwdoms_aux_a, .xelpdp.aux_ch = AUX_CH_A), + I915_PW("AUX_B", &icl_pwdoms_aux_b, .xelpdp.aux_ch = AUX_CH_B), + I915_PW("AUX_TC1", &xelpdp_pwdoms_aux_tc1, .xelpdp.aux_ch = AUX_CH_USBC1), + I915_PW("AUX_TC2", &xelpdp_pwdoms_aux_tc2, .xelpdp.aux_ch = AUX_CH_USBC2), + I915_PW("AUX_TC3", &xelpdp_pwdoms_aux_tc3, .xelpdp.aux_ch = AUX_CH_USBC3), + I915_PW("AUX_TC4", &xelpdp_pwdoms_aux_tc4, .xelpdp.aux_ch = AUX_CH_USBC4), + ), + .ops = &xelpdp_aux_power_well_ops, + }, +}; + +static const struct i915_power_well_desc_list xelpdp_power_wells[] = { + I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), + I915_PW_DESCRIPTORS(icl_power_wells_pw_1), + I915_PW_DESCRIPTORS(xelpdp_power_wells_main), +}; + static void init_power_well_domains(const struct i915_power_well_instance *inst, struct i915_power_well *power_well) { @@ -1388,7 +1499,7 @@ __set_power_wells(struct i915_power_domains *power_domains, { struct drm_i915_private *i915 = container_of(power_domains, struct drm_i915_private, - power_domains); + display.power.domains); u64 power_well_ids = 0; const struct i915_power_well_desc_list *desc_list; const struct i915_power_well_desc *desc; @@ -1447,7 +1558,7 @@ int intel_display_power_map_init(struct i915_power_domains *power_domains) { struct drm_i915_private *i915 = container_of(power_domains, struct drm_i915_private, - power_domains); + display.power.domains); /* * The enabling order will be from lower to higher indexed wells, * the disabling order is reversed. @@ -1457,7 +1568,9 @@ int intel_display_power_map_init(struct i915_power_domains *power_domains) return 0; } - if (DISPLAY_VER(i915) >= 13) + if (DISPLAY_VER(i915) >= 14) + return set_power_wells(power_domains, xelpdp_power_wells); + else if (DISPLAY_VER(i915) >= 13) return set_power_wells(power_domains, xelpd_power_wells); else if (IS_DG1(i915)) return set_power_wells(power_domains, dg1_power_wells); diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c index 91cfd5890f46..df7ee4969ef1 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_well.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c @@ -5,6 +5,7 @@ #include "i915_drv.h" #include "i915_irq.h" +#include "intel_backlight_regs.h" #include "intel_combo_phy.h" #include "intel_combo_phy_regs.h" #include "intel_crt.h" @@ -16,10 +17,10 @@ #include "intel_dpll.h" #include "intel_hotplug.h" #include "intel_pcode.h" -#include "intel_pm.h" #include "intel_pps.h" #include "intel_tc.h" #include "intel_vga.h" +#include "skl_watermark.h" #include "vlv_sideband.h" #include "vlv_sideband_reg.h" @@ -84,7 +85,7 @@ lookup_power_well(struct drm_i915_private *i915, drm_WARN(&i915->drm, 1, "Power well %d not defined for this platform\n", power_well_id); - return &i915->power_domains.power_wells[0]; + return &i915->display.power.domains.power_wells[0]; } void intel_power_well_enable(struct drm_i915_private *i915, @@ -710,8 +711,8 @@ void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv) drm_dbg_kms(&dev_priv->drm, "Resetting DC state tracking from %02x to %02x\n", - dev_priv->dmc.dc_state, val); - dev_priv->dmc.dc_state = val; + dev_priv->display.dmc.dc_state, val); + dev_priv->display.dmc.dc_state = val; } /** @@ -746,8 +747,8 @@ void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state) return; if (drm_WARN_ON_ONCE(&dev_priv->drm, - state & ~dev_priv->dmc.allowed_dc_mask)) - state &= dev_priv->dmc.allowed_dc_mask; + state & ~dev_priv->display.dmc.allowed_dc_mask)) + state &= dev_priv->display.dmc.allowed_dc_mask; val = intel_de_read(dev_priv, DC_STATE_EN); mask = gen9_dc_mask(dev_priv); @@ -755,16 +756,16 @@ void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state) val & mask, state); /* Check if DMC is ignoring our DC state requests */ - if ((val & mask) != dev_priv->dmc.dc_state) + if ((val & mask) != dev_priv->display.dmc.dc_state) drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n", - dev_priv->dmc.dc_state, val & mask); + dev_priv->display.dmc.dc_state, val & mask); val &= ~mask; val |= state; gen9_write_dc_state(dev_priv, val); - dev_priv->dmc.dc_state = val & mask; + dev_priv->display.dmc.dc_state = val & mask; } static void tgl_enable_dc3co(struct drm_i915_private *dev_priv) @@ -945,7 +946,7 @@ static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv, static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv) { u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv); - u8 enabled_dbuf_slices = dev_priv->dbuf.enabled_slices; + u8 enabled_dbuf_slices = dev_priv->display.dbuf.enabled_slices; drm_WARN(&dev_priv->drm, hw_enabled_dbuf_slices != enabled_dbuf_slices, @@ -958,7 +959,7 @@ void gen9_disable_dc_states(struct drm_i915_private *dev_priv) { struct intel_cdclk_config cdclk_config = {}; - if (dev_priv->dmc.target_dc_state == DC_STATE_EN_DC3CO) { + if (dev_priv->display.dmc.target_dc_state == DC_STATE_EN_DC3CO) { tgl_disable_dc3co(dev_priv); return; } @@ -971,7 +972,7 @@ void gen9_disable_dc_states(struct drm_i915_private *dev_priv) intel_cdclk_get_cdclk(dev_priv, &cdclk_config); /* Can't read out voltage_level so can't use intel_cdclk_changed() */ drm_WARN_ON(&dev_priv->drm, - intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, + intel_cdclk_needs_modeset(&dev_priv->display.cdclk.hw, &cdclk_config)); gen9_assert_dbuf_enabled(dev_priv); @@ -1000,7 +1001,7 @@ static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, if (!intel_dmc_has_payload(dev_priv)) return; - switch (dev_priv->dmc.target_dc_state) { + switch (dev_priv->display.dmc.target_dc_state) { case DC_STATE_EN_DC3CO: tgl_enable_dc3co(dev_priv); break; @@ -1156,10 +1157,10 @@ static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) * (and never recovering) in this case. intel_dsi_post_disable() will * clear it when we turn off the display. */ - val = intel_de_read(dev_priv, DSPCLK_GATE_D); + val = intel_de_read(dev_priv, DSPCLK_GATE_D(dev_priv)); val &= DPOUNIT_CLOCK_GATE_DISABLE; val |= VRHUNIT_CLOCK_GATE_DISABLE; - intel_de_write(dev_priv, DSPCLK_GATE_D, val); + intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), val); /* * Disable trickle feed and enable pnd deadline calculation @@ -1207,7 +1208,7 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) * During driver initialization/resume we can avoid restoring the * part of the HW/SW state that will be inited anyway explicitly. */ - if (dev_priv->power_domains.initializing) + if (dev_priv->display.power.domains.initializing) return; intel_hpd_init(dev_priv); @@ -1302,7 +1303,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv) lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); struct i915_power_well *cmn_d = lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); - u32 phy_control = dev_priv->chv_phy_control; + u32 phy_control = dev_priv->display.power.chv_phy_control; u32 phy_status = 0; u32 phy_status_mask = 0xffffffff; @@ -1313,7 +1314,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv) * reset (ie. the power well has been disabled at * least once). */ - if (!dev_priv->chv_phy_assert[DPIO_PHY0]) + if (!dev_priv->display.power.chv_phy_assert[DPIO_PHY0]) phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) | PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) | PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) | @@ -1321,7 +1322,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv) PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) | PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1)); - if (!dev_priv->chv_phy_assert[DPIO_PHY1]) + if (!dev_priv->display.power.chv_phy_assert[DPIO_PHY1]) phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) | PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) | PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1)); @@ -1397,7 +1398,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv) drm_err(&dev_priv->drm, "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n", intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask, - phy_status, dev_priv->chv_phy_control); + phy_status, dev_priv->display.power.chv_phy_control); } #undef BITS_SET @@ -1457,13 +1458,13 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, vlv_dpio_put(dev_priv); - dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy); + dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy); intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, - dev_priv->chv_phy_control); + dev_priv->display.power.chv_phy_control); drm_dbg_kms(&dev_priv->drm, "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", - phy, dev_priv->chv_phy_control); + phy, dev_priv->display.power.chv_phy_control); assert_chv_phy_status(dev_priv); } @@ -1487,18 +1488,18 @@ static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, assert_pll_disabled(dev_priv, PIPE_C); } - dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy); + dev_priv->display.power.chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy); intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, - dev_priv->chv_phy_control); + dev_priv->display.power.chv_phy_control); vlv_set_power_well(dev_priv, power_well, false); drm_dbg_kms(&dev_priv->drm, "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", - phy, dev_priv->chv_phy_control); + phy, dev_priv->display.power.chv_phy_control); /* PHY is fully reset now, so we can enable the PHY state asserts */ - dev_priv->chv_phy_assert[phy] = true; + dev_priv->display.power.chv_phy_assert[phy] = true; assert_chv_phy_status(dev_priv); } @@ -1516,7 +1517,7 @@ static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpi * reset (ie. the power well has been disabled at * least once). */ - if (!dev_priv->chv_phy_assert[phy]) + if (!dev_priv->display.power.chv_phy_assert[phy]) return; if (ch == DPIO_CH0) @@ -1570,27 +1571,27 @@ static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpi bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, enum dpio_channel ch, bool override) { - struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_domains *power_domains = &dev_priv->display.power.domains; bool was_override; mutex_lock(&power_domains->lock); - was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); + was_override = dev_priv->display.power.chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); if (override == was_override) goto out; if (override) - dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); + dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); else - dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); + dev_priv->display.power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, - dev_priv->chv_phy_control); + dev_priv->display.power.chv_phy_control); drm_dbg_kms(&dev_priv->drm, "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n", - phy, ch, dev_priv->chv_phy_control); + phy, ch, dev_priv->display.power.chv_phy_control); assert_chv_phy_status(dev_priv); @@ -1604,26 +1605,26 @@ void chv_phy_powergate_lanes(struct intel_encoder *encoder, bool override, unsigned int mask) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_domains *power_domains = &dev_priv->display.power.domains; enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder)); enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder)); mutex_lock(&power_domains->lock); - dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch); - dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch); + dev_priv->display.power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch); + dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch); if (override) - dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); + dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); else - dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); + dev_priv->display.power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, - dev_priv->chv_phy_control); + dev_priv->display.power.chv_phy_control); drm_dbg_kms(&dev_priv->drm, "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n", - phy, ch, mask, dev_priv->chv_phy_control); + phy, ch, mask, dev_priv->display.power.chv_phy_control); assert_chv_phy_status(dev_priv); @@ -1701,7 +1702,7 @@ static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, - dev_priv->chv_phy_control); + dev_priv->display.power.chv_phy_control); } static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, @@ -1797,6 +1798,43 @@ tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv, return intel_power_well_refcount(power_well); } +static void xelpdp_aux_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch; + + intel_de_rmw(dev_priv, XELPDP_DP_AUX_CH_CTL(aux_ch), + XELPDP_DP_AUX_CH_CTL_POWER_REQUEST, + XELPDP_DP_AUX_CH_CTL_POWER_REQUEST); + + /* + * The power status flag cannot be used to determine whether aux + * power wells have finished powering up. Instead we're + * expected to just wait a fixed 600us after raising the request + * bit. + */ + usleep_range(600, 1200); +} + +static void xelpdp_aux_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch; + + intel_de_rmw(dev_priv, XELPDP_DP_AUX_CH_CTL(aux_ch), + XELPDP_DP_AUX_CH_CTL_POWER_REQUEST, + 0); + usleep_range(10, 30); +} + +static bool xelpdp_aux_power_well_enabled(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch; + + return intel_de_read(dev_priv, XELPDP_DP_AUX_CH_CTL(aux_ch)) & + XELPDP_DP_AUX_CH_CTL_POWER_STATUS; +} const struct i915_power_well_ops i9xx_always_on_power_well_ops = { .sync_hw = i9xx_power_well_sync_hw_noop, @@ -1910,3 +1948,10 @@ const struct i915_power_well_ops tgl_tc_cold_off_ops = { .disable = tgl_tc_cold_off_power_well_disable, .is_enabled = tgl_tc_cold_off_power_well_is_enabled, }; + +const struct i915_power_well_ops xelpdp_aux_power_well_ops = { + .sync_hw = i9xx_power_well_sync_hw_noop, + .enable = xelpdp_aux_power_well_enable, + .disable = xelpdp_aux_power_well_disable, + .is_enabled = xelpdp_aux_power_well_enabled, +}; diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.h b/drivers/gpu/drm/i915/display/intel_display_power_well.h index d0624642dcb6..e13b521e322a 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_well.h +++ b/drivers/gpu/drm/i915/display/intel_display_power_well.h @@ -14,15 +14,15 @@ struct drm_i915_private; struct i915_power_well; #define for_each_power_well(__dev_priv, __power_well) \ - for ((__power_well) = (__dev_priv)->power_domains.power_wells; \ - (__power_well) - (__dev_priv)->power_domains.power_wells < \ - (__dev_priv)->power_domains.power_well_count; \ + for ((__power_well) = (__dev_priv)->display.power.domains.power_wells; \ + (__power_well) - (__dev_priv)->display.power.domains.power_wells < \ + (__dev_priv)->display.power.domains.power_well_count; \ (__power_well)++) #define for_each_power_well_reverse(__dev_priv, __power_well) \ - for ((__power_well) = (__dev_priv)->power_domains.power_wells + \ - (__dev_priv)->power_domains.power_well_count - 1; \ - (__power_well) - (__dev_priv)->power_domains.power_wells >= 0; \ + for ((__power_well) = (__dev_priv)->display.power.domains.power_wells + \ + (__dev_priv)->display.power.domains.power_well_count - 1; \ + (__power_well) - (__dev_priv)->display.power.domains.power_wells >= 0; \ (__power_well)--) /* @@ -80,6 +80,9 @@ struct i915_power_well_instance { */ u8 idx; } hsw; + struct { + u8 aux_ch; + } xelpdp; }; }; @@ -169,5 +172,6 @@ extern const struct i915_power_well_ops vlv_dpio_power_well_ops; extern const struct i915_power_well_ops icl_aux_power_well_ops; extern const struct i915_power_well_ops icl_ddi_power_well_ops; extern const struct i915_power_well_ops tgl_tc_cold_off_ops; +extern const struct i915_power_well_ops xelpdp_aux_power_well_ops; #endif diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index 01977cd237eb..298d00a11f47 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -1130,6 +1130,7 @@ struct intel_crtc_state { /* m2_n2 for eDP downclock */ struct intel_link_m_n dp_m2_n2; bool has_drrs; + bool seamless_m_n; /* PSR is supported but might not be enabled due the lack of enabled planes */ bool has_psr; @@ -1712,7 +1713,7 @@ struct intel_dp { /* Display stream compression testing */ bool force_dsc_en; - int force_dsc_bpp; + int force_dsc_bpc; bool hobl_failed; bool hobl_active; diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c index fa9ef591b885..e52ecc0738a6 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc.c +++ b/drivers/gpu/drm/i915/display/intel_dmc.c @@ -52,8 +52,8 @@ #define DISPLAY_VER12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE -#define DG2_DMC_PATH DMC_PATH(dg2, 2, 06) -#define DG2_DMC_VERSION_REQUIRED DMC_VERSION(2, 06) +#define DG2_DMC_PATH DMC_PATH(dg2, 2, 07) +#define DG2_DMC_VERSION_REQUIRED DMC_VERSION(2, 07) MODULE_FIRMWARE(DG2_DMC_PATH); #define ADLP_DMC_PATH DMC_PATH(adlp, 2, 16) @@ -250,7 +250,7 @@ struct stepping_info { static bool has_dmc_id_fw(struct drm_i915_private *i915, int dmc_id) { - return i915->dmc.dmc_info[dmc_id].payload; + return i915->display.dmc.dmc_info[dmc_id].payload; } bool intel_dmc_has_payload(struct drm_i915_private *i915) @@ -277,6 +277,17 @@ static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv) intel_de_posting_read(dev_priv, DC_STATE_DEBUG); } +static void disable_event_handler(struct drm_i915_private *i915, + i915_reg_t ctl_reg, i915_reg_t htp_reg) +{ + intel_de_write(i915, ctl_reg, + REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK, + DMC_EVT_CTL_TYPE_EDGE_0_1) | + REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK, + DMC_EVT_CTL_EVENT_ID_FALSE)); + intel_de_write(i915, htp_reg, 0); +} + static void disable_flip_queue_event(struct drm_i915_private *i915, i915_reg_t ctl_reg, i915_reg_t htp_reg) @@ -299,12 +310,7 @@ disable_flip_queue_event(struct drm_i915_private *i915, return; } - intel_de_write(i915, ctl_reg, - REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK, - DMC_EVT_CTL_TYPE_EDGE_0_1) | - REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK, - DMC_EVT_CTL_EVENT_ID_FALSE)); - intel_de_write(i915, htp_reg, 0); + disable_event_handler(i915, ctl_reg, htp_reg); } static bool @@ -356,6 +362,51 @@ disable_all_flip_queue_events(struct drm_i915_private *i915) } } +static void disable_all_event_handlers(struct drm_i915_private *i915) +{ + int id; + + /* TODO: disable the event handlers on pre-GEN12 platforms as well */ + if (DISPLAY_VER(i915) < 12) + return; + + for (id = DMC_FW_MAIN; id < DMC_FW_MAX; id++) { + int handler; + + if (!has_dmc_id_fw(i915, id)) + continue; + + for (handler = 0; handler < DMC_EVENT_HANDLER_COUNT_GEN12; handler++) + disable_event_handler(i915, + DMC_EVT_CTL(i915, id, handler), + DMC_EVT_HTP(i915, id, handler)); + } +} + +static void pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable) +{ + enum pipe pipe; + + if (DISPLAY_VER(i915) != 13) + return; + + /* + * Wa_16015201720:adl-p,dg2 + * The WA requires clock gating to be disabled all the time + * for pipe A and B. + * For pipe C and D clock gating needs to be disabled only + * during initializing the firmware. + */ + if (enable) + for (pipe = PIPE_A; pipe <= PIPE_D; pipe++) + intel_de_rmw(i915, CLKGATE_DIS_PSL_EXT(pipe), + 0, PIPEDMC_GATING_DIS); + else + for (pipe = PIPE_C; pipe <= PIPE_D; pipe++) + intel_de_rmw(i915, CLKGATE_DIS_PSL_EXT(pipe), + PIPEDMC_GATING_DIS, 0); +} + /** * intel_dmc_load_program() - write the firmware from memory to register. * @dev_priv: i915 drm device. @@ -366,12 +417,16 @@ disable_all_flip_queue_events(struct drm_i915_private *i915) */ void intel_dmc_load_program(struct drm_i915_private *dev_priv) { - struct intel_dmc *dmc = &dev_priv->dmc; + struct intel_dmc *dmc = &dev_priv->display.dmc; u32 id, i; if (!intel_dmc_has_payload(dev_priv)) return; + pipedmc_clock_gating_wa(dev_priv, true); + + disable_all_event_handlers(dev_priv); + assert_rpm_wakelock_held(&dev_priv->runtime_pm); preempt_disable(); @@ -393,7 +448,7 @@ void intel_dmc_load_program(struct drm_i915_private *dev_priv) } } - dev_priv->dmc.dc_state = 0; + dev_priv->display.dmc.dc_state = 0; gen9_set_dc_state_debugmask(dev_priv); @@ -403,12 +458,31 @@ void intel_dmc_load_program(struct drm_i915_private *dev_priv) * here. */ disable_all_flip_queue_events(dev_priv); + + pipedmc_clock_gating_wa(dev_priv, false); +} + +/** + * intel_dmc_disable_program() - disable the firmware + * @i915: i915 drm device + * + * Disable all event handlers in the firmware, making sure the firmware is + * inactive after the display is uninitialized. + */ +void intel_dmc_disable_program(struct drm_i915_private *i915) +{ + if (!intel_dmc_has_payload(i915)) + return; + + pipedmc_clock_gating_wa(i915, true); + disable_all_event_handlers(i915); + pipedmc_clock_gating_wa(i915, false); } void assert_dmc_loaded(struct drm_i915_private *i915) { drm_WARN_ONCE(&i915->drm, - !intel_de_read(i915, DMC_PROGRAM(i915->dmc.dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)), + !intel_de_read(i915, DMC_PROGRAM(i915->display.dmc.dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)), "DMC program storage start is NULL\n"); drm_WARN_ONCE(&i915->drm, !intel_de_read(i915, DMC_SSP_BASE), "DMC SSP Base Not fine\n"); @@ -445,7 +519,7 @@ static void dmc_set_fw_offset(struct intel_dmc *dmc, { unsigned int i, id; - struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc); + struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc); for (i = 0; i < num_entries; i++) { id = package_ver <= 1 ? DMC_FW_MAIN : fw_info[i].dmc_id; @@ -473,7 +547,7 @@ static bool dmc_mmio_addr_sanity_check(struct intel_dmc *dmc, const u32 *mmioaddr, u32 mmio_count, int header_ver, u8 dmc_id) { - struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc); + struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc); u32 start_range, end_range; int i; @@ -511,7 +585,7 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc, const struct intel_dmc_header_base *dmc_header, size_t rem_size, u8 dmc_id) { - struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc); + struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc); struct dmc_fw_info *dmc_info = &dmc->dmc_info[dmc_id]; unsigned int header_len_bytes, dmc_header_size, payload_size, i; const u32 *mmioaddr, *mmiodata; @@ -622,7 +696,7 @@ parse_dmc_fw_package(struct intel_dmc *dmc, const struct stepping_info *si, size_t rem_size) { - struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc); + struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc); u32 package_size = sizeof(struct intel_package_header); u32 num_entries, max_entries; const struct intel_fw_info *fw_info; @@ -676,7 +750,7 @@ static u32 parse_dmc_fw_css(struct intel_dmc *dmc, struct intel_css_header *css_header, size_t rem_size) { - struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc); + struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc); if (rem_size < sizeof(struct intel_css_header)) { drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n"); @@ -713,7 +787,7 @@ static void parse_dmc_fw(struct drm_i915_private *dev_priv, struct intel_css_header *css_header; struct intel_package_header *package_header; struct intel_dmc_header_base *dmc_header; - struct intel_dmc *dmc = &dev_priv->dmc; + struct intel_dmc *dmc = &dev_priv->display.dmc; struct stepping_info display_info = { '*', '*'}; const struct stepping_info *si = intel_get_stepping_info(dev_priv, &display_info); u32 readcount = 0; @@ -740,7 +814,7 @@ static void parse_dmc_fw(struct drm_i915_private *dev_priv, readcount += r; for (id = 0; id < DMC_FW_MAX; id++) { - if (!dev_priv->dmc.dmc_info[id].present) + if (!dev_priv->display.dmc.dmc_info[id].present) continue; offset = readcount + dmc->dmc_info[id].dmc_offset * 4; @@ -756,15 +830,15 @@ static void parse_dmc_fw(struct drm_i915_private *dev_priv, static void intel_dmc_runtime_pm_get(struct drm_i915_private *dev_priv) { - drm_WARN_ON(&dev_priv->drm, dev_priv->dmc.wakeref); - dev_priv->dmc.wakeref = + drm_WARN_ON(&dev_priv->drm, dev_priv->display.dmc.wakeref); + dev_priv->display.dmc.wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); } static void intel_dmc_runtime_pm_put(struct drm_i915_private *dev_priv) { intel_wakeref_t wakeref __maybe_unused = - fetch_and_zero(&dev_priv->dmc.wakeref); + fetch_and_zero(&dev_priv->display.dmc.wakeref); intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref); } @@ -775,10 +849,10 @@ static void dmc_load_work_fn(struct work_struct *work) struct intel_dmc *dmc; const struct firmware *fw = NULL; - dev_priv = container_of(work, typeof(*dev_priv), dmc.work); - dmc = &dev_priv->dmc; + dev_priv = container_of(work, typeof(*dev_priv), display.dmc.work); + dmc = &dev_priv->display.dmc; - request_firmware(&fw, dev_priv->dmc.fw_path, dev_priv->drm.dev); + request_firmware(&fw, dev_priv->display.dmc.fw_path, dev_priv->drm.dev); parse_dmc_fw(dev_priv, fw); if (intel_dmc_has_payload(dev_priv)) { @@ -787,7 +861,7 @@ static void dmc_load_work_fn(struct work_struct *work) drm_info(&dev_priv->drm, "Finished loading DMC firmware %s (v%u.%u)\n", - dev_priv->dmc.fw_path, DMC_VERSION_MAJOR(dmc->version), + dev_priv->display.dmc.fw_path, DMC_VERSION_MAJOR(dmc->version), DMC_VERSION_MINOR(dmc->version)); } else { drm_notice(&dev_priv->drm, @@ -810,9 +884,9 @@ static void dmc_load_work_fn(struct work_struct *work) */ void intel_dmc_ucode_init(struct drm_i915_private *dev_priv) { - struct intel_dmc *dmc = &dev_priv->dmc; + struct intel_dmc *dmc = &dev_priv->display.dmc; - INIT_WORK(&dev_priv->dmc.work, dmc_load_work_fn); + INIT_WORK(&dev_priv->display.dmc.work, dmc_load_work_fn); if (!HAS_DMC(dev_priv)) return; @@ -895,7 +969,7 @@ void intel_dmc_ucode_init(struct drm_i915_private *dev_priv) } drm_dbg_kms(&dev_priv->drm, "Loading %s\n", dmc->fw_path); - schedule_work(&dev_priv->dmc.work); + schedule_work(&dev_priv->display.dmc.work); } /** @@ -911,7 +985,7 @@ void intel_dmc_ucode_suspend(struct drm_i915_private *dev_priv) if (!HAS_DMC(dev_priv)) return; - flush_work(&dev_priv->dmc.work); + flush_work(&dev_priv->display.dmc.work); /* Drop the reference held in case DMC isn't loaded. */ if (!intel_dmc_has_payload(dev_priv)) @@ -953,16 +1027,16 @@ void intel_dmc_ucode_fini(struct drm_i915_private *dev_priv) return; intel_dmc_ucode_suspend(dev_priv); - drm_WARN_ON(&dev_priv->drm, dev_priv->dmc.wakeref); + drm_WARN_ON(&dev_priv->drm, dev_priv->display.dmc.wakeref); for (id = 0; id < DMC_FW_MAX; id++) - kfree(dev_priv->dmc.dmc_info[id].payload); + kfree(dev_priv->display.dmc.dmc_info[id].payload); } void intel_dmc_print_error_state(struct drm_i915_error_state_buf *m, struct drm_i915_private *i915) { - struct intel_dmc *dmc = &i915->dmc; + struct intel_dmc *dmc = &i915->display.dmc; if (!HAS_DMC(i915)) return; @@ -984,7 +1058,7 @@ static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused) if (!HAS_DMC(i915)) return -ENODEV; - dmc = &i915->dmc; + dmc = &i915->display.dmc; wakeref = intel_runtime_pm_get(&i915->runtime_pm); diff --git a/drivers/gpu/drm/i915/display/intel_dmc.h b/drivers/gpu/drm/i915/display/intel_dmc.h index 41091aee3b47..67e03315ef99 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc.h +++ b/drivers/gpu/drm/i915/display/intel_dmc.h @@ -47,6 +47,7 @@ struct intel_dmc { void intel_dmc_ucode_init(struct drm_i915_private *i915); void intel_dmc_load_program(struct drm_i915_private *i915); +void intel_dmc_disable_program(struct drm_i915_private *i915); void intel_dmc_ucode_fini(struct drm_i915_private *i915); void intel_dmc_ucode_suspend(struct drm_i915_private *i915); void intel_dmc_ucode_resume(struct drm_i915_private *i915); diff --git a/drivers/gpu/drm/i915/display/intel_dmc_regs.h b/drivers/gpu/drm/i915/display/intel_dmc_regs.h index 238620b55966..5e5e41644ddf 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc_regs.h +++ b/drivers/gpu/drm/i915/display/intel_dmc_regs.h @@ -28,6 +28,8 @@ #define _DMC_REG(i915, dmc_id, reg) \ ((reg) - __DMC_REG_MMIO_BASE + _DMC_REG_MMIO_BASE(i915, dmc_id)) +#define DMC_EVENT_HANDLER_COUNT_GEN12 8 + #define _DMC_EVT_HTP_0 0x8f004 #define DMC_EVT_HTP(i915, dmc_id, handler) \ diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index a4e113253df3..c9be61d2348e 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -286,11 +286,22 @@ static int intel_dp_max_common_rate(struct intel_dp *intel_dp) return intel_dp_common_rate(intel_dp, intel_dp->num_common_rates - 1); } +static int intel_dp_max_source_lane_count(struct intel_digital_port *dig_port) +{ + int vbt_max_lanes = intel_bios_dp_max_lane_count(&dig_port->base); + int max_lanes = dig_port->max_lanes; + + if (vbt_max_lanes) + max_lanes = min(max_lanes, vbt_max_lanes); + + return max_lanes; +} + /* Theoretical max between source and sink */ static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - int source_max = dig_port->max_lanes; + int source_max = intel_dp_max_source_lane_count(dig_port); int sink_max = intel_dp->max_sink_lane_count; int fia_max = intel_tc_port_fia_max_lane_count(dig_port); int lttpr_max = drm_dp_lttpr_max_lane_count(intel_dp->lttpr_common_caps); @@ -389,23 +400,13 @@ static int dg2_max_source_rate(struct intel_dp *intel_dp) return intel_dp_is_edp(intel_dp) ? 810000 : 1350000; } -static bool is_low_voltage_sku(struct drm_i915_private *i915, enum phy phy) -{ - u32 voltage; - - voltage = intel_de_read(i915, ICL_PORT_COMP_DW3(phy)) & VOLTAGE_INFO_MASK; - - return voltage == VOLTAGE_INFO_0_85V; -} - static int icl_max_source_rate(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); - if (intel_phy_is_combo(dev_priv, phy) && - (is_low_voltage_sku(dev_priv, phy) || !intel_dp_is_edp(intel_dp))) + if (intel_phy_is_combo(dev_priv, phy) && !intel_dp_is_edp(intel_dp)) return 540000; return 810000; @@ -413,23 +414,7 @@ static int icl_max_source_rate(struct intel_dp *intel_dp) static int ehl_max_source_rate(struct intel_dp *intel_dp) { - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); - enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); - - if (intel_dp_is_edp(intel_dp) || is_low_voltage_sku(dev_priv, phy)) - return 540000; - - return 810000; -} - -static int dg1_max_source_rate(struct intel_dp *intel_dp) -{ - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - enum phy phy = intel_port_to_phy(i915, dig_port->base.port); - - if (intel_phy_is_combo(i915, phy) && is_low_voltage_sku(i915, phy)) + if (intel_dp_is_edp(intel_dp)) return 540000; return 810000; @@ -491,7 +476,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp) max_rate = dg2_max_source_rate(intel_dp); else if (IS_ALDERLAKE_P(dev_priv) || IS_ALDERLAKE_S(dev_priv) || IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) - max_rate = dg1_max_source_rate(intel_dp); + max_rate = 810000; else if (IS_JSL_EHL(dev_priv)) max_rate = ehl_max_source_rate(intel_dp); else @@ -720,7 +705,7 @@ static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915, if (bigjoiner) { u32 max_bpp_bigjoiner = - i915->max_cdclk_freq * 48 / + i915->display.cdclk.max_cdclk_freq * 48 / intel_dp_mode_to_fec_clock(mode_clock); bits_per_pixel = min(bits_per_pixel, max_bpp_bigjoiner); @@ -1312,21 +1297,45 @@ intel_dp_adjust_compliance_config(struct intel_dp *intel_dp, } } +static bool has_seamless_m_n(struct intel_connector *connector) +{ + struct drm_i915_private *i915 = to_i915(connector->base.dev); + + /* + * Seamless M/N reprogramming only implemented + * for BDW+ double buffered M/N registers so far. + */ + return HAS_DOUBLE_BUFFERED_M_N(i915) && + intel_panel_drrs_type(connector) == DRRS_TYPE_SEAMLESS; +} + +static int intel_dp_mode_clock(const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + struct intel_connector *connector = to_intel_connector(conn_state->connector); + const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; + + /* FIXME a bit of a mess wrt clock vs. crtc_clock */ + if (has_seamless_m_n(connector)) + return intel_panel_highest_mode(connector, adjusted_mode)->clock; + else + return adjusted_mode->crtc_clock; +} + /* Optimize link config in order: max bpp, min clock, min lanes */ static int intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state, const struct link_config_limits *limits) { - struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; - int bpp, i, lane_count; + int bpp, i, lane_count, clock = intel_dp_mode_clock(pipe_config, conn_state); int mode_rate, link_rate, link_avail; for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) { int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp); - mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, - output_bpp); + mode_rate = intel_dp_link_required(clock, output_bpp); for (i = 0; i < intel_dp->num_common_rates; i++) { link_rate = intel_dp_common_rate(intel_dp, i); @@ -1377,7 +1386,18 @@ static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 max_req_bpc) return 0; } -#define DSC_SUPPORTED_VERSION_MIN 1 +static int intel_dp_source_dsc_version_minor(struct intel_dp *intel_dp) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + + return DISPLAY_VER(i915) >= 14 ? 2 : 1; +} + +static int intel_dp_sink_dsc_version_minor(struct intel_dp *intel_dp) +{ + return (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & DP_DSC_MINOR_MASK) >> + DP_DSC_MINOR_SHIFT; +} static int intel_dp_dsc_compute_params(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) @@ -1395,6 +1415,7 @@ static int intel_dp_dsc_compute_params(struct intel_encoder *encoder, * DP_DSC_RC_BUF_SIZE for this. */ vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST; + vdsc_cfg->pic_height = crtc_state->hw.adjusted_mode.crtc_vdisplay; /* * Slice Height of 8 works for all currently available panels. So start @@ -1416,9 +1437,8 @@ static int intel_dp_dsc_compute_params(struct intel_encoder *encoder, (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT; vdsc_cfg->dsc_version_minor = - min(DSC_SUPPORTED_VERSION_MIN, - (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & - DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT); + min(intel_dp_source_dsc_version_minor(intel_dp), + intel_dp_sink_dsc_version_minor(intel_dp)); vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] & DP_DSC_RGB; @@ -1464,6 +1484,11 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, conn_state->max_requested_bpc); + if (intel_dp->force_dsc_bpc) { + pipe_bpp = intel_dp->force_dsc_bpc * 3; + drm_dbg_kms(&dev_priv->drm, "Input DSC BPP forced to %d", pipe_bpp); + } + /* Min Input BPC for ICL+ is 8 */ if (pipe_bpp < 8 * 3) { drm_dbg_kms(&dev_priv->drm, @@ -1515,28 +1540,12 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, pipe_config->dsc.slice_count = dsc_dp_slice_count; } - /* As of today we support DSC for only RGB */ - if (intel_dp->force_dsc_bpp) { - if (intel_dp->force_dsc_bpp >= 8 && - intel_dp->force_dsc_bpp < pipe_bpp) { - drm_dbg_kms(&dev_priv->drm, - "DSC BPP forced to %d", - intel_dp->force_dsc_bpp); - pipe_config->dsc.compressed_bpp = - intel_dp->force_dsc_bpp; - } else { - drm_dbg_kms(&dev_priv->drm, - "Invalid DSC BPP %d", - intel_dp->force_dsc_bpp); - } - } - /* * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate * is greater than the maximum Cdclock and if slice count is even * then we need to use 2 VDSC instances. */ - if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq || + if (adjusted_mode->crtc_clock > dev_priv->display.cdclk.max_cdclk_freq || pipe_config->bigjoiner_pipes) { if (pipe_config->dsc.slice_count < 2) { drm_dbg_kms(&dev_priv->drm, @@ -1626,7 +1635,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, * Optimize for slow and wide for everything, because there are some * eDP 1.3 and 1.4 panels don't work well with fast and narrow. */ - ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits); + ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, conn_state, &limits); if (ret || joiner_needs_dsc || intel_dp->force_dsc_en) { drm_dbg_kms(&i915->drm, "Try DSC (fallback=%s, joiner=%s, force=%s)\n", @@ -1869,8 +1878,7 @@ intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp, static bool cpu_transcoder_has_drrs(struct drm_i915_private *i915, enum transcoder cpu_transcoder) { - /* M1/N1 is double buffered */ - if (DISPLAY_VER(i915) >= 9 || IS_BROADWELL(i915)) + if (HAS_DOUBLE_BUFFERED_M_N(i915)) return true; return intel_cpu_transcoder_has_m2_n2(i915, cpu_transcoder); @@ -1908,13 +1916,16 @@ static bool can_enable_drrs(struct intel_connector *connector, static void intel_dp_drrs_compute_config(struct intel_connector *connector, struct intel_crtc_state *pipe_config, - int output_bpp, bool constant_n) + int output_bpp) { struct drm_i915_private *i915 = to_i915(connector->base.dev); const struct drm_display_mode *downclock_mode = intel_panel_downclock_mode(connector, &pipe_config->hw.adjusted_mode); int pixel_clock; + if (has_seamless_m_n(connector)) + pipe_config->seamless_m_n = true; + if (!can_enable_drrs(connector, pipe_config, downclock_mode)) { if (intel_cpu_transcoder_has_m2_n2(i915, pipe_config->cpu_transcoder)) intel_zero_m_n(&pipe_config->dp_m2_n2); @@ -1932,7 +1943,7 @@ intel_dp_drrs_compute_config(struct intel_connector *connector, intel_link_compute_m_n(output_bpp, pipe_config->lane_count, pixel_clock, pipe_config->port_clock, &pipe_config->dp_m2_n2, - constant_n, pipe_config->fec_enable); + pipe_config->fec_enable); /* FIXME: abstract this better */ if (pipe_config->splitter.enable) @@ -2007,7 +2018,6 @@ intel_dp_compute_config(struct intel_encoder *encoder, struct intel_dp *intel_dp = enc_to_intel_dp(encoder); const struct drm_display_mode *fixed_mode; struct intel_connector *connector = intel_dp->attached_connector; - bool constant_n = drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CONSTANT_N); int ret = 0, output_bpp; if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && encoder->port != PORT_A) @@ -2086,7 +2096,7 @@ intel_dp_compute_config(struct intel_encoder *encoder, adjusted_mode->crtc_clock, pipe_config->port_clock, &pipe_config->dp_m_n, - constant_n, pipe_config->fec_enable); + pipe_config->fec_enable); /* FIXME: abstract this better */ if (pipe_config->splitter.enable) @@ -2097,8 +2107,7 @@ intel_dp_compute_config(struct intel_encoder *encoder, intel_vrr_compute_config(pipe_config, conn_state); intel_psr_compute_config(intel_dp, pipe_config, conn_state); - intel_dp_drrs_compute_config(connector, pipe_config, - output_bpp, constant_n); + intel_dp_drrs_compute_config(connector, pipe_config, output_bpp); intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state); intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state); @@ -5032,9 +5041,9 @@ static void intel_dp_oob_hotplug_event(struct drm_connector *connector) struct drm_i915_private *i915 = to_i915(connector->dev); spin_lock_irq(&i915->irq_lock); - i915->hotplug.event_bits |= BIT(encoder->hpd_pin); + i915->display.hotplug.event_bits |= BIT(encoder->hpd_pin); spin_unlock_irq(&i915->irq_lock); - queue_delayed_work(system_wq, &i915->hotplug.hotplug_work, 0); + queue_delayed_work(system_wq, &i915->display.hotplug.hotplug_work, 0); } static const struct drm_connector_funcs intel_dp_connector_funcs = { @@ -5192,7 +5201,7 @@ intel_edp_add_properties(struct intel_dp *intel_dp) return; drm_connector_set_panel_orientation_with_quirk(&connector->base, - i915->vbt.orientation, + i915->display.vbt.orientation, fixed_mode->hdisplay, fixed_mode->vdisplay); } @@ -5302,8 +5311,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, intel_panel_init(intel_connector); - if (!(dev_priv->quirks & QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK)) - intel_connector->panel.backlight.power = intel_pps_backlight_power; intel_backlight_setup(intel_connector, pipe); intel_edp_add_properties(intel_dp); diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c index 2bc119374555..48c375c65a41 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c @@ -42,7 +42,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp) bool done; #define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) - done = wait_event_timeout(i915->gmbus_wait_queue, C, + done = wait_event_timeout(i915->display.gmbus.wait_queue, C, msecs_to_jiffies_timeout(timeout_ms)); /* just trace the final value */ @@ -86,7 +86,7 @@ static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) * divide by 2000 and use that */ if (dig_port->aux_ch == AUX_CH_A) - freq = dev_priv->cdclk.hw.cdclk; + freq = dev_priv->display.cdclk.hw.cdclk; else freq = RUNTIME_INFO(dev_priv)->rawclk_freq; return DIV_ROUND_CLOSEST(freq, 2000); @@ -150,6 +150,7 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp, u32 unused) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); u32 ret; /* @@ -170,6 +171,13 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp, if (intel_tc_port_in_tbt_alt_mode(dig_port)) ret |= DP_AUX_CH_CTL_TBT_IO; + /* + * Power request bit is already set during aux power well enable. + * Preserve the bit across aux transactions. + */ + if (DISPLAY_VER(i915) >= 14) + ret |= XELPDP_DP_AUX_CH_CTL_POWER_REQUEST; + return ret; } @@ -629,6 +637,46 @@ static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index) } } +static i915_reg_t xelpdp_aux_ctl_reg(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + enum aux_ch aux_ch = dig_port->aux_ch; + + switch (aux_ch) { + case AUX_CH_A: + case AUX_CH_B: + case AUX_CH_USBC1: + case AUX_CH_USBC2: + case AUX_CH_USBC3: + case AUX_CH_USBC4: + return XELPDP_DP_AUX_CH_CTL(aux_ch); + default: + MISSING_CASE(aux_ch); + return XELPDP_DP_AUX_CH_CTL(AUX_CH_A); + } +} + +static i915_reg_t xelpdp_aux_data_reg(struct intel_dp *intel_dp, int index) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + enum aux_ch aux_ch = dig_port->aux_ch; + + switch (aux_ch) { + case AUX_CH_A: + case AUX_CH_B: + case AUX_CH_USBC1: + case AUX_CH_USBC2: + case AUX_CH_USBC3: + case AUX_CH_USBC4: + return XELPDP_DP_AUX_CH_DATA(aux_ch, index); + default: + MISSING_CASE(aux_ch); + return XELPDP_DP_AUX_CH_DATA(AUX_CH_A, index); + } +} + void intel_dp_aux_fini(struct intel_dp *intel_dp) { if (cpu_latency_qos_request_active(&intel_dp->pm_qos)) @@ -644,7 +692,10 @@ void intel_dp_aux_init(struct intel_dp *intel_dp) struct intel_encoder *encoder = &dig_port->base; enum aux_ch aux_ch = dig_port->aux_ch; - if (DISPLAY_VER(dev_priv) >= 12) { + if (DISPLAY_VER(dev_priv) >= 14) { + intel_dp->aux_ch_ctl_reg = xelpdp_aux_ctl_reg; + intel_dp->aux_ch_data_reg = xelpdp_aux_data_reg; + } else if (DISPLAY_VER(dev_priv) >= 12) { intel_dp->aux_ch_ctl_reg = tgl_aux_ctl_reg; intel_dp->aux_ch_data_reg = tgl_aux_data_reg; } else if (DISPLAY_VER(dev_priv) >= 9) { diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c index a7640dbcf00e..88689124c013 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c @@ -17,6 +17,7 @@ #include "intel_dp.h" #include "intel_dp_hdcp.h" #include "intel_hdcp.h" +#include "intel_hdcp_regs.h" static unsigned int transcoder_to_stream_enc_status(enum transcoder cpu_transcoder) { diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c index 9feaf1a589f3..3d3efcf02011 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -37,17 +37,6 @@ static void intel_dp_reset_lttpr_count(struct intel_dp *intel_dp) DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] = 0; } -static const char *intel_dp_phy_name(enum drm_dp_phy dp_phy, - char *buf, size_t buf_size) -{ - if (dp_phy == DP_PHY_DPRX) - snprintf(buf, buf_size, "DPRX"); - else - snprintf(buf, buf_size, "LTTPR %d", dp_phy - DP_PHY_LTTPR1 + 1); - - return buf; -} - static u8 *intel_dp_lttpr_phy_caps(struct intel_dp *intel_dp, enum drm_dp_phy dp_phy) { @@ -60,20 +49,19 @@ static void intel_dp_read_lttpr_phy_caps(struct intel_dp *intel_dp, { struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy); - char phy_name[10]; - - intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)); if (drm_dp_read_lttpr_phy_caps(&intel_dp->aux, dpcd, dp_phy, phy_caps) < 0) { drm_dbg_kms(&dp_to_i915(intel_dp)->drm, "[ENCODER:%d:%s][%s] failed to read the PHY caps\n", - encoder->base.base.id, encoder->base.name, phy_name); + encoder->base.base.id, encoder->base.name, + drm_dp_phy_name(dp_phy)); return; } drm_dbg_kms(&dp_to_i915(intel_dp)->drm, "[ENCODER:%d:%s][%s] PHY capabilities: %*ph\n", - encoder->base.base.id, encoder->base.name, phy_name, + encoder->base.base.id, encoder->base.name, + drm_dp_phy_name(dp_phy), (int)sizeof(intel_dp->lttpr_phy_caps[0]), phy_caps); } @@ -423,14 +411,13 @@ intel_dp_get_adjust_train(struct intel_dp *intel_dp, { struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; struct drm_i915_private *i915 = to_i915(encoder->base.dev); - char phy_name[10]; int lane; if (intel_dp_is_uhbr(crtc_state)) { drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] 128b/132b, lanes: %d, " "TX FFE request: " TRAIN_REQ_FMT "\n", encoder->base.base.id, encoder->base.name, - intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)), + drm_dp_phy_name(dp_phy), crtc_state->lane_count, TRAIN_REQ_TX_FFE_ARGS(link_status)); } else { @@ -438,7 +425,7 @@ intel_dp_get_adjust_train(struct intel_dp *intel_dp, "vswing request: " TRAIN_REQ_FMT ", " "pre-emphasis request: " TRAIN_REQ_FMT "\n", encoder->base.base.id, encoder->base.name, - intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)), + drm_dp_phy_name(dp_phy), crtc_state->lane_count, TRAIN_REQ_VSWING_ARGS(link_status), TRAIN_REQ_PREEMPH_ARGS(link_status)); @@ -503,13 +490,12 @@ intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; struct drm_i915_private *i915 = to_i915(encoder->base.dev); u8 train_pat = intel_dp_training_pattern_symbol(dp_train_pat); - char phy_name[10]; if (train_pat != DP_TRAINING_PATTERN_DISABLE) drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] Using DP training pattern TPS%c\n", encoder->base.base.id, encoder->base.name, - intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)), + drm_dp_phy_name(dp_phy), dp_training_pattern_name(train_pat)); intel_dp->set_link_train(intel_dp, crtc_state, dp_train_pat); @@ -546,13 +532,12 @@ void intel_dp_set_signal_levels(struct intel_dp *intel_dp, { struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; struct drm_i915_private *i915 = to_i915(encoder->base.dev); - char phy_name[10]; if (intel_dp_is_uhbr(crtc_state)) { drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] 128b/132b, lanes: %d, " "TX FFE presets: " TRAIN_SET_FMT "\n", encoder->base.base.id, encoder->base.name, - intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)), + drm_dp_phy_name(dp_phy), crtc_state->lane_count, TRAIN_SET_TX_FFE_ARGS(intel_dp->train_set)); } else { @@ -560,7 +545,7 @@ void intel_dp_set_signal_levels(struct intel_dp *intel_dp, "vswing levels: " TRAIN_SET_FMT ", " "pre-emphasis levels: " TRAIN_SET_FMT "\n", encoder->base.base.id, encoder->base.name, - intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)), + drm_dp_phy_name(dp_phy), crtc_state->lane_count, TRAIN_SET_VSWING_ARGS(intel_dp->train_set), TRAIN_SET_PREEMPH_ARGS(intel_dp->train_set)); @@ -671,6 +656,28 @@ intel_dp_prepare_link_train(struct intel_dp *intel_dp, intel_dp_compute_rate(intel_dp, crtc_state->port_clock, &link_bw, &rate_select); + /* + * WaEdpLinkRateDataReload + * + * Parade PS8461E MUX (used on varius TGL+ laptops) needs + * to snoop the link rates reported by the sink when we + * use LINK_RATE_SET in order to operate in jitter cleaning + * mode (as opposed to redriver mode). Unfortunately it + * loses track of the snooped link rates when powered down, + * so we need to make it re-snoop often. Without this high + * link rates are not stable. + */ + if (!link_bw) { + struct intel_connector *connector = intel_dp->attached_connector; + __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; + + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Reloading eDP link rates\n", + connector->base.base.id, connector->base.name); + + drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES, + sink_rates, sizeof(sink_rates)); + } + if (link_bw) drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Using LINK_BW_SET value %02x\n", @@ -732,12 +739,11 @@ intel_dp_dump_link_status(struct intel_dp *intel_dp, enum drm_dp_phy dp_phy, { struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; struct drm_i915_private *i915 = to_i915(encoder->base.dev); - char phy_name[10]; drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] ln0_1:0x%x ln2_3:0x%x align:0x%x sink:0x%x adj_req0_1:0x%x adj_req2_3:0x%x\n", encoder->base.base.id, encoder->base.name, - intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)), + drm_dp_phy_name(dp_phy), link_status[0], link_status[1], link_status[2], link_status[3], link_status[4], link_status[5]); } @@ -757,21 +763,19 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp, int voltage_tries, cr_tries, max_cr_tries; u8 link_status[DP_LINK_STATUS_SIZE]; bool max_vswing_reached = false; - char phy_name[10]; int delay_us; delay_us = drm_dp_read_clock_recovery_delay(&intel_dp->aux, intel_dp->dpcd, dp_phy, intel_dp_is_uhbr(crtc_state)); - intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)); - /* clock recovery */ if (!intel_dp_reset_link_train(intel_dp, crtc_state, dp_phy, DP_TRAINING_PATTERN_1 | DP_LINK_SCRAMBLING_DISABLE)) { drm_err(&i915->drm, "[ENCODER:%d:%s][%s] Failed to enable link training\n", - encoder->base.base.id, encoder->base.name, phy_name); + encoder->base.base.id, encoder->base.name, + drm_dp_phy_name(dp_phy)); return false; } @@ -795,14 +799,16 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp, if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy, link_status) < 0) { drm_err(&i915->drm, "[ENCODER:%d:%s][%s] Failed to get link status\n", - encoder->base.base.id, encoder->base.name, phy_name); + encoder->base.base.id, encoder->base.name, + drm_dp_phy_name(dp_phy)); return false; } if (drm_dp_clock_recovery_ok(link_status, crtc_state->lane_count)) { drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] Clock recovery OK\n", - encoder->base.base.id, encoder->base.name, phy_name); + encoder->base.base.id, encoder->base.name, + drm_dp_phy_name(dp_phy)); return true; } @@ -810,7 +816,8 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp, intel_dp_dump_link_status(intel_dp, dp_phy, link_status); drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] Same voltage tried 5 times\n", - encoder->base.base.id, encoder->base.name, phy_name); + encoder->base.base.id, encoder->base.name, + drm_dp_phy_name(dp_phy)); return false; } @@ -818,7 +825,8 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp, intel_dp_dump_link_status(intel_dp, dp_phy, link_status); drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] Max Voltage Swing reached\n", - encoder->base.base.id, encoder->base.name, phy_name); + encoder->base.base.id, encoder->base.name, + drm_dp_phy_name(dp_phy)); return false; } @@ -828,7 +836,8 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp, if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) { drm_err(&i915->drm, "[ENCODER:%d:%s][%s] Failed to update link training\n", - encoder->base.base.id, encoder->base.name, phy_name); + encoder->base.base.id, encoder->base.name, + drm_dp_phy_name(dp_phy)); return false; } @@ -846,7 +855,8 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp, intel_dp_dump_link_status(intel_dp, dp_phy, link_status); drm_err(&i915->drm, "[ENCODER:%d:%s][%s] Failed clock recovery %d times, giving up!\n", - encoder->base.base.id, encoder->base.name, phy_name, max_cr_tries); + encoder->base.base.id, encoder->base.name, + drm_dp_phy_name(dp_phy), max_cr_tries); return false; } @@ -924,15 +934,12 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp, u32 training_pattern; u8 link_status[DP_LINK_STATUS_SIZE]; bool channel_eq = false; - char phy_name[10]; int delay_us; delay_us = drm_dp_read_channel_eq_delay(&intel_dp->aux, intel_dp->dpcd, dp_phy, intel_dp_is_uhbr(crtc_state)); - intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)); - training_pattern = intel_dp_training_pattern(intel_dp, crtc_state, dp_phy); /* Scrambling is disabled for TPS2/3 and enabled for TPS4 */ if (training_pattern != DP_TRAINING_PATTERN_4) @@ -944,7 +951,7 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp, drm_err(&i915->drm, "[ENCODER:%d:%s][%s] Failed to start channel equalization\n", encoder->base.base.id, encoder->base.name, - phy_name); + drm_dp_phy_name(dp_phy)); return false; } @@ -955,7 +962,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp, link_status) < 0) { drm_err(&i915->drm, "[ENCODER:%d:%s][%s] Failed to get link status\n", - encoder->base.base.id, encoder->base.name, phy_name); + encoder->base.base.id, encoder->base.name, + drm_dp_phy_name(dp_phy)); break; } @@ -966,7 +974,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp, drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] Clock recovery check failed, cannot " "continue channel equalization\n", - encoder->base.base.id, encoder->base.name, phy_name); + encoder->base.base.id, encoder->base.name, + drm_dp_phy_name(dp_phy)); break; } @@ -975,7 +984,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp, channel_eq = true; drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] Channel EQ done. DP Training successful\n", - encoder->base.base.id, encoder->base.name, phy_name); + encoder->base.base.id, encoder->base.name, + drm_dp_phy_name(dp_phy)); break; } @@ -985,7 +995,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp, if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) { drm_err(&i915->drm, "[ENCODER:%d:%s][%s] Failed to update link training\n", - encoder->base.base.id, encoder->base.name, phy_name); + encoder->base.base.id, encoder->base.name, + drm_dp_phy_name(dp_phy)); break; } } @@ -995,7 +1006,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp, intel_dp_dump_link_status(intel_dp, dp_phy, link_status); drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] Channel equalization failed 5 times\n", - encoder->base.base.id, encoder->base.name, phy_name); + encoder->base.base.id, encoder->base.name, + drm_dp_phy_name(dp_phy)); } return channel_eq; @@ -1070,7 +1082,6 @@ intel_dp_link_train_phy(struct intel_dp *intel_dp, { struct intel_connector *connector = intel_dp->attached_connector; struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; - char phy_name[10]; bool ret = false; if (!intel_dp_link_training_clock_recovery(intel_dp, crtc_state, dp_phy)) @@ -1086,7 +1097,7 @@ out: "[CONNECTOR:%d:%s][ENCODER:%d:%s][%s] Link Training %s at link rate = %d, lane count = %d\n", connector->base.base.id, connector->base.name, encoder->base.base.id, encoder->base.name, - intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)), + drm_dp_phy_name(dp_phy), ret ? "passed" : "failed", crtc_state->port_clock, crtc_state->lane_count); diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 7713c19042f3..03604a37931c 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -58,7 +58,6 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder, struct drm_i915_private *i915 = to_i915(connector->base.dev); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; - bool constant_n = drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CONSTANT_N); int bpp, slots = -EINVAL; mst_state = drm_atomic_get_mst_topology_state(state, &intel_dp->mst_mgr); @@ -100,7 +99,7 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder, adjusted_mode->crtc_clock, crtc_state->port_clock, &crtc_state->dp_m_n, - constant_n, crtc_state->fec_enable); + crtc_state->fec_enable); crtc_state->dp_m_n.tu = slots; return 0; @@ -566,7 +565,10 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state, drm_dp_add_payload_part2(&intel_dp->mst_mgr, &state->base, drm_atomic_get_mst_payload_state(mst_state, connector->port)); - if (DISPLAY_VER(dev_priv) >= 12 && pipe_config->fec_enable) + if (DISPLAY_VER(dev_priv) >= 14 && pipe_config->fec_enable) + intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(trans), 0, + FECSTALL_DIS_DPTSTREAM_DPTTG); + else if (DISPLAY_VER(dev_priv) >= 12 && pipe_config->fec_enable) intel_de_rmw(dev_priv, CHICKEN_TRANS(trans), 0, FECSTALL_DIS_DPTSTREAM_DPTTG); diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c index cc6abe761f5e..8732b8722ed7 100644 --- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c +++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c @@ -484,7 +484,7 @@ void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy) enum dpio_phy rcomp_phy = phy_info->rcomp_phy; bool was_enabled; - lockdep_assert_held(&dev_priv->power_domains.lock); + lockdep_assert_held(&dev_priv->display.power.domains.lock); was_enabled = true; if (rcomp_phy != -1) diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c index 5262f16b45ac..b15ba78d64d6 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll.c +++ b/drivers/gpu/drm/i915/display/intel_dpll.c @@ -938,12 +938,25 @@ static int hsw_crtc_compute_clock(struct intel_atomic_state *state, intel_atomic_get_new_crtc_state(state, crtc); struct intel_encoder *encoder = intel_get_crtc_new_encoder(state, crtc_state); + int ret; if (DISPLAY_VER(dev_priv) < 11 && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) return 0; - return intel_compute_shared_dplls(state, crtc, encoder); + ret = intel_compute_shared_dplls(state, crtc, encoder); + if (ret) + return ret; + + /* FIXME this is a mess */ + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) + return 0; + + /* CRT dotclock is determined via other means */ + if (!crtc_state->has_pch_encoder) + crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); + + return 0; } static int hsw_crtc_get_shared_dpll(struct intel_atomic_state *state, @@ -969,8 +982,15 @@ static int dg2_crtc_compute_clock(struct intel_atomic_state *state, intel_atomic_get_new_crtc_state(state, crtc); struct intel_encoder *encoder = intel_get_crtc_new_encoder(state, crtc_state); + int ret; + + ret = intel_mpllb_calc_state(crtc_state, encoder); + if (ret) + return ret; + + crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); - return intel_mpllb_calc_state(crtc_state, encoder); + return 0; } static bool ilk_needs_fb_cb_tune(const struct dpll *dpll, int factor) @@ -991,7 +1011,7 @@ static void ilk_update_pll_dividers(struct intel_crtc_state *crtc_state, factor = 21; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { if ((intel_panel_use_ssc(dev_priv) && - dev_priv->vbt.lvds_ssc_freq == 100000) || + dev_priv->display.vbt.lvds_ssc_freq == 100000) || (HAS_PCH_IBX(dev_priv) && intel_is_dual_link_lvds(dev_priv))) factor = 25; @@ -1096,6 +1116,7 @@ static int ilk_crtc_compute_clock(struct intel_atomic_state *state, intel_atomic_get_new_crtc_state(state, crtc); const struct intel_limit *limit; int refclk = 120000; + int ret; /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ if (!crtc_state->has_pch_encoder) @@ -1105,8 +1126,8 @@ static int ilk_crtc_compute_clock(struct intel_atomic_state *state, if (intel_panel_use_ssc(dev_priv)) { drm_dbg_kms(&dev_priv->drm, "using SSC reference clock of %d kHz\n", - dev_priv->vbt.lvds_ssc_freq); - refclk = dev_priv->vbt.lvds_ssc_freq; + dev_priv->display.vbt.lvds_ssc_freq); + refclk = dev_priv->display.vbt.lvds_ssc_freq; } if (intel_is_dual_link_lvds(dev_priv)) { @@ -1132,7 +1153,14 @@ static int ilk_crtc_compute_clock(struct intel_atomic_state *state, ilk_compute_dpll(crtc_state, &crtc_state->dpll, &crtc_state->dpll); - return intel_compute_shared_dplls(state, crtc, NULL); + ret = intel_compute_shared_dplls(state, crtc, NULL); + if (ret) + return ret; + + crtc_state->port_clock = crtc_state->dpll.dot; + crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); + + return ret; } static int ilk_crtc_get_shared_dpll(struct intel_atomic_state *state, @@ -1198,6 +1226,13 @@ static int chv_crtc_compute_clock(struct intel_atomic_state *state, chv_compute_dpll(crtc_state); + /* FIXME this is a mess */ + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) + return 0; + + crtc_state->port_clock = crtc_state->dpll.dot; + crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); + return 0; } @@ -1217,6 +1252,13 @@ static int vlv_crtc_compute_clock(struct intel_atomic_state *state, vlv_compute_dpll(crtc_state); + /* FIXME this is a mess */ + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) + return 0; + + crtc_state->port_clock = crtc_state->dpll.dot; + crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); + return 0; } @@ -1231,7 +1273,7 @@ static int g4x_crtc_compute_clock(struct intel_atomic_state *state, if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { if (intel_panel_use_ssc(dev_priv)) { - refclk = dev_priv->vbt.lvds_ssc_freq; + refclk = dev_priv->display.vbt.lvds_ssc_freq; drm_dbg_kms(&dev_priv->drm, "using SSC reference clock of %d kHz\n", refclk); @@ -1259,6 +1301,11 @@ static int g4x_crtc_compute_clock(struct intel_atomic_state *state, i9xx_compute_dpll(crtc_state, &crtc_state->dpll, &crtc_state->dpll); + crtc_state->port_clock = crtc_state->dpll.dot; + /* FIXME this is a mess */ + if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_TVOUT)) + crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); + return 0; } @@ -1273,7 +1320,7 @@ static int pnv_crtc_compute_clock(struct intel_atomic_state *state, if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { if (intel_panel_use_ssc(dev_priv)) { - refclk = dev_priv->vbt.lvds_ssc_freq; + refclk = dev_priv->display.vbt.lvds_ssc_freq; drm_dbg_kms(&dev_priv->drm, "using SSC reference clock of %d kHz\n", refclk); @@ -1292,6 +1339,9 @@ static int pnv_crtc_compute_clock(struct intel_atomic_state *state, i9xx_compute_dpll(crtc_state, &crtc_state->dpll, &crtc_state->dpll); + crtc_state->port_clock = crtc_state->dpll.dot; + crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); + return 0; } @@ -1306,7 +1356,7 @@ static int i9xx_crtc_compute_clock(struct intel_atomic_state *state, if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { if (intel_panel_use_ssc(dev_priv)) { - refclk = dev_priv->vbt.lvds_ssc_freq; + refclk = dev_priv->display.vbt.lvds_ssc_freq; drm_dbg_kms(&dev_priv->drm, "using SSC reference clock of %d kHz\n", refclk); @@ -1325,6 +1375,11 @@ static int i9xx_crtc_compute_clock(struct intel_atomic_state *state, i9xx_compute_dpll(crtc_state, &crtc_state->dpll, &crtc_state->dpll); + crtc_state->port_clock = crtc_state->dpll.dot; + /* FIXME this is a mess */ + if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_TVOUT)) + crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); + return 0; } @@ -1339,7 +1394,7 @@ static int i8xx_crtc_compute_clock(struct intel_atomic_state *state, if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { if (intel_panel_use_ssc(dev_priv)) { - refclk = dev_priv->vbt.lvds_ssc_freq; + refclk = dev_priv->display.vbt.lvds_ssc_freq; drm_dbg_kms(&dev_priv->drm, "using SSC reference clock of %d kHz\n", refclk); @@ -1360,6 +1415,9 @@ static int i8xx_crtc_compute_clock(struct intel_atomic_state *state, i8xx_compute_dpll(crtc_state, &crtc_state->dpll, &crtc_state->dpll); + crtc_state->port_clock = crtc_state->dpll.dot; + crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); + return 0; } @@ -1411,16 +1469,13 @@ int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state, drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state)); - if (drm_WARN_ON(&i915->drm, crtc_state->shared_dpll)) - return 0; - memset(&crtc_state->dpll_hw_state, 0, sizeof(crtc_state->dpll_hw_state)); if (!crtc_state->hw.enable) return 0; - ret = i915->dpll_funcs->crtc_compute_clock(state, crtc); + ret = i915->display.funcs.dpll->crtc_compute_clock(state, crtc); if (ret) { drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Couldn't calculate DPLL settings\n", crtc->base.base.id, crtc->base.name); @@ -1439,17 +1494,15 @@ int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state, int ret; drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state)); + drm_WARN_ON(&i915->drm, !crtc_state->hw.enable && crtc_state->shared_dpll); - if (drm_WARN_ON(&i915->drm, crtc_state->shared_dpll)) - return 0; - - if (!crtc_state->hw.enable) + if (!crtc_state->hw.enable || crtc_state->shared_dpll) return 0; - if (!i915->dpll_funcs->crtc_get_shared_dpll) + if (!i915->display.funcs.dpll->crtc_get_shared_dpll) return 0; - ret = i915->dpll_funcs->crtc_get_shared_dpll(state, crtc); + ret = i915->display.funcs.dpll->crtc_get_shared_dpll(state, crtc); if (ret) { drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Couldn't get a shared DPLL\n", crtc->base.base.id, crtc->base.name); @@ -1463,23 +1516,23 @@ void intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv) { if (IS_DG2(dev_priv)) - dev_priv->dpll_funcs = &dg2_dpll_funcs; + dev_priv->display.funcs.dpll = &dg2_dpll_funcs; else if (DISPLAY_VER(dev_priv) >= 9 || HAS_DDI(dev_priv)) - dev_priv->dpll_funcs = &hsw_dpll_funcs; + dev_priv->display.funcs.dpll = &hsw_dpll_funcs; else if (HAS_PCH_SPLIT(dev_priv)) - dev_priv->dpll_funcs = &ilk_dpll_funcs; + dev_priv->display.funcs.dpll = &ilk_dpll_funcs; else if (IS_CHERRYVIEW(dev_priv)) - dev_priv->dpll_funcs = &chv_dpll_funcs; + dev_priv->display.funcs.dpll = &chv_dpll_funcs; else if (IS_VALLEYVIEW(dev_priv)) - dev_priv->dpll_funcs = &vlv_dpll_funcs; + dev_priv->display.funcs.dpll = &vlv_dpll_funcs; else if (IS_G4X(dev_priv)) - dev_priv->dpll_funcs = &g4x_dpll_funcs; + dev_priv->display.funcs.dpll = &g4x_dpll_funcs; else if (IS_PINEVIEW(dev_priv)) - dev_priv->dpll_funcs = &pnv_dpll_funcs; + dev_priv->display.funcs.dpll = &pnv_dpll_funcs; else if (DISPLAY_VER(dev_priv) != 2) - dev_priv->dpll_funcs = &i9xx_dpll_funcs; + dev_priv->display.funcs.dpll = &i9xx_dpll_funcs; else - dev_priv->dpll_funcs = &i8xx_dpll_funcs; + dev_priv->display.funcs.dpll = &i8xx_dpll_funcs; } static bool i9xx_has_pps(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index 118598c9a809..e5fb66a5dd02 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -113,8 +113,8 @@ intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv, enum intel_dpll_id i; /* Copy shared dpll state */ - for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) { - struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i]; + for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) { + struct intel_shared_dpll *pll = &dev_priv->display.dpll.shared_dplls[i]; shared_dpll[i] = pll->state; } @@ -149,7 +149,7 @@ struct intel_shared_dpll * intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv, enum intel_dpll_id id) { - return &dev_priv->dpll.shared_dplls[id]; + return &dev_priv->display.dpll.shared_dplls[id]; } /** @@ -164,11 +164,11 @@ enum intel_dpll_id intel_get_shared_dpll_id(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { - long pll_idx = pll - dev_priv->dpll.shared_dplls; + long pll_idx = pll - dev_priv->display.dpll.shared_dplls; if (drm_WARN_ON(&dev_priv->drm, pll_idx < 0 || - pll_idx >= dev_priv->dpll.num_shared_dpll)) + pll_idx >= dev_priv->display.dpll.num_shared_dpll)) return -1; return pll_idx; @@ -245,7 +245,7 @@ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state) if (drm_WARN_ON(&dev_priv->drm, pll == NULL)) return; - mutex_lock(&dev_priv->dpll.lock); + mutex_lock(&dev_priv->display.dpll.lock); old_mask = pll->active_mask; if (drm_WARN_ON(&dev_priv->drm, !(pll->state.pipe_mask & pipe_mask)) || @@ -271,7 +271,7 @@ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state) pll->on = true; out: - mutex_unlock(&dev_priv->dpll.lock); + mutex_unlock(&dev_priv->display.dpll.lock); } /** @@ -294,7 +294,7 @@ void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state) if (pll == NULL) return; - mutex_lock(&dev_priv->dpll.lock); + mutex_lock(&dev_priv->display.dpll.lock); if (drm_WARN(&dev_priv->drm, !(pll->active_mask & pipe_mask), "%s not used by [CRTC:%d:%s]\n", pll->info->name, crtc->base.base.id, crtc->base.name)) @@ -317,7 +317,7 @@ void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state) pll->on = false; out: - mutex_unlock(&dev_priv->dpll.lock); + mutex_unlock(&dev_priv->display.dpll.lock); } static struct intel_shared_dpll * @@ -336,7 +336,7 @@ intel_find_shared_dpll(struct intel_atomic_state *state, drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1)); for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) { - pll = &dev_priv->dpll.shared_dplls[i]; + pll = &dev_priv->display.dpll.shared_dplls[i]; /* Only want to check enabled timings first */ if (shared_dpll[i].pipe_mask == 0) { @@ -436,9 +436,9 @@ void intel_shared_dpll_swap_state(struct intel_atomic_state *state) if (!state->dpll_set) return; - for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) { + for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) { struct intel_shared_dpll *pll = - &dev_priv->dpll.shared_dplls[i]; + &dev_priv->display.dpll.shared_dplls[i]; swap(pll->state, shared_dpll[i]); } @@ -537,7 +537,7 @@ static int ibx_get_dpll(struct intel_atomic_state *state, if (HAS_PCH_IBX(dev_priv)) { /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */ i = (enum intel_dpll_id) crtc->pipe; - pll = &dev_priv->dpll.shared_dplls[i]; + pll = &dev_priv->display.dpll.shared_dplls[i]; drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] using pre-allocated %s\n", @@ -905,37 +905,6 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */, *r2_out = best.r2; } -static int -hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct intel_crtc_state *crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); - unsigned int p, n2, r2; - - hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p); - - crtc_state->dpll_hw_state.wrpll = - WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL | - WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) | - WRPLL_DIVIDER_POST(p); - - return 0; -} - -static struct intel_shared_dpll * -hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct intel_crtc_state *crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); - - return intel_find_shared_dpll(state, crtc, - &crtc_state->dpll_hw_state, - BIT(DPLL_ID_WRPLL2) | - BIT(DPLL_ID_WRPLL1)); -} - static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv, const struct intel_shared_dpll *pll, const struct intel_dpll_hw_state *pll_state) @@ -948,7 +917,7 @@ static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv, case WRPLL_REF_SPECIAL_HSW: /* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */ if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) { - refclk = dev_priv->dpll.ref_clks.nssc; + refclk = dev_priv->display.dpll.ref_clks.nssc; break; } fallthrough; @@ -958,7 +927,7 @@ static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv, * code only cares about 5% accuracy, and spread is a max of * 0.5% downspread. */ - refclk = dev_priv->dpll.ref_clks.ssc; + refclk = dev_priv->display.dpll.ref_clks.ssc; break; case WRPLL_REF_LCPLL: refclk = 2700000; @@ -977,6 +946,41 @@ static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv, } static int +hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + unsigned int p, n2, r2; + + hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p); + + crtc_state->dpll_hw_state.wrpll = + WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL | + WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) | + WRPLL_DIVIDER_POST(p); + + crtc_state->port_clock = hsw_ddi_wrpll_get_freq(i915, NULL, + &crtc_state->dpll_hw_state); + + return 0; +} + +static struct intel_shared_dpll * +hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + + return intel_find_shared_dpll(state, crtc, + &crtc_state->dpll_hw_state, + BIT(DPLL_ID_WRPLL2) | + BIT(DPLL_ID_WRPLL1)); +} + +static int hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); @@ -1145,12 +1149,12 @@ static int hsw_get_dpll(struct intel_atomic_state *state, static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915) { - i915->dpll.ref_clks.ssc = 135000; + i915->display.dpll.ref_clks.ssc = 135000; /* Non-SSC is only used on non-ULT HSW. */ if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT) - i915->dpll.ref_clks.nssc = 24000; + i915->display.dpll.ref_clks.nssc = 24000; else - i915->dpll.ref_clks.nssc = 135000; + i915->display.dpll.ref_clks.nssc = 135000; } static void hsw_dump_hw_state(struct drm_i915_private *dev_priv, @@ -1618,48 +1622,11 @@ skip_remaining_dividers: return 0; } -static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state) -{ - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); - struct skl_wrpll_params wrpll_params = {}; - u32 ctrl1, cfgcr1, cfgcr2; - int ret; - - /* - * See comment in intel_dpll_hw_state to understand why we always use 0 - * as the DPLL id in this function. - */ - ctrl1 = DPLL_CTRL1_OVERRIDE(0); - - ctrl1 |= DPLL_CTRL1_HDMI_MODE(0); - - ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000, - i915->dpll.ref_clks.nssc, &wrpll_params); - if (ret) - return ret; - - cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE | - DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) | - wrpll_params.dco_integer; - - cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) | - DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) | - DPLL_CFGCR2_KDIV(wrpll_params.kdiv) | - DPLL_CFGCR2_PDIV(wrpll_params.pdiv) | - wrpll_params.central_freq; - - crtc_state->dpll_hw_state.ctrl1 = ctrl1; - crtc_state->dpll_hw_state.cfgcr1 = cfgcr1; - crtc_state->dpll_hw_state.cfgcr2 = cfgcr2; - - return 0; -} - static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915, const struct intel_shared_dpll *pll, const struct intel_dpll_hw_state *pll_state) { - int ref_clock = i915->dpll.ref_clks.nssc; + int ref_clock = i915->display.dpll.ref_clks.nssc; u32 p0, p1, p2, dco_freq; p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK; @@ -1726,6 +1693,46 @@ static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915, return dco_freq / (p0 * p1 * p2 * 5); } +static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct skl_wrpll_params wrpll_params = {}; + u32 ctrl1, cfgcr1, cfgcr2; + int ret; + + /* + * See comment in intel_dpll_hw_state to understand why we always use 0 + * as the DPLL id in this function. + */ + ctrl1 = DPLL_CTRL1_OVERRIDE(0); + + ctrl1 |= DPLL_CTRL1_HDMI_MODE(0); + + ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000, + i915->display.dpll.ref_clks.nssc, &wrpll_params); + if (ret) + return ret; + + cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE | + DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) | + wrpll_params.dco_integer; + + cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) | + DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) | + DPLL_CFGCR2_KDIV(wrpll_params.kdiv) | + DPLL_CFGCR2_PDIV(wrpll_params.pdiv) | + wrpll_params.central_freq; + + crtc_state->dpll_hw_state.ctrl1 = ctrl1; + crtc_state->dpll_hw_state.cfgcr1 = cfgcr1; + crtc_state->dpll_hw_state.cfgcr2 = cfgcr2; + + crtc_state->port_clock = skl_ddi_wrpll_get_freq(i915, NULL, + &crtc_state->dpll_hw_state); + + return 0; +} + static int skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state) { @@ -1858,7 +1865,7 @@ static int skl_ddi_pll_get_freq(struct drm_i915_private *i915, static void skl_update_dpll_ref_clks(struct drm_i915_private *i915) { /* No SSC ref */ - i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref; + i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref; } static void skl_dump_hw_state(struct drm_i915_private *dev_priv, @@ -2171,7 +2178,7 @@ static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state, } } - chv_calc_dpll_params(i915->dpll.ref_clks.nssc, clk_div); + chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, clk_div); drm_WARN_ON(&i915->drm, clk_div->vco == 0 || clk_div->dot != crtc_state->port_clock); @@ -2245,6 +2252,23 @@ static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state, return 0; } +static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915, + const struct intel_shared_dpll *pll, + const struct intel_dpll_hw_state *pll_state) +{ + struct dpll clock; + + clock.m1 = 2; + clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, pll_state->pll0) << 22; + if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE) + clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK, pll_state->pll2); + clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, pll_state->pll1); + clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, pll_state->ebb0); + clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, pll_state->ebb0); + + return chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, &clock); +} + static int bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state) { @@ -2258,28 +2282,20 @@ bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state) static int bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state) { + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); struct dpll clk_div = {}; + int ret; bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div); - return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div); -} - -static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915, - const struct intel_shared_dpll *pll, - const struct intel_dpll_hw_state *pll_state) -{ - struct dpll clock; + ret = bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div); + if (ret) + return ret; - clock.m1 = 2; - clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, pll_state->pll0) << 22; - if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE) - clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK, pll_state->pll2); - clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, pll_state->pll1); - clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, pll_state->ebb0); - clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, pll_state->ebb0); + crtc_state->port_clock = bxt_ddi_pll_get_freq(i915, NULL, + &crtc_state->dpll_hw_state); - return chv_calc_dpll_params(i915->dpll.ref_clks.nssc, &clock); + return 0; } static int bxt_compute_dpll(struct intel_atomic_state *state, @@ -2324,8 +2340,8 @@ static int bxt_get_dpll(struct intel_atomic_state *state, static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915) { - i915->dpll.ref_clks.ssc = 100000; - i915->dpll.ref_clks.nssc = 100000; + i915->display.dpll.ref_clks.ssc = 100000; + i915->display.dpll.ref_clks.nssc = 100000; /* DSI non-SSC ref 19.2MHz */ } @@ -2468,7 +2484,7 @@ ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915) return ((IS_PLATFORM(i915, INTEL_ELKHARTLAKE) && IS_JSL_EHL_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) || IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) && - i915->dpll.ref_clks.nssc == 38400; + i915->display.dpll.ref_clks.nssc == 38400; } struct icl_combo_pll_params { @@ -2562,7 +2578,7 @@ static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state, { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); const struct icl_combo_pll_params *params = - dev_priv->dpll.ref_clks.nssc == 24000 ? + dev_priv->display.dpll.ref_clks.nssc == 24000 ? icl_dp_combo_pll_24MHz_values : icl_dp_combo_pll_19_2MHz_values; int clock = crtc_state->port_clock; @@ -2585,9 +2601,9 @@ static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state, struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); if (DISPLAY_VER(dev_priv) >= 12) { - switch (dev_priv->dpll.ref_clks.nssc) { + switch (dev_priv->display.dpll.ref_clks.nssc) { default: - MISSING_CASE(dev_priv->dpll.ref_clks.nssc); + MISSING_CASE(dev_priv->display.dpll.ref_clks.nssc); fallthrough; case 19200: case 38400: @@ -2598,9 +2614,9 @@ static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state, break; } } else { - switch (dev_priv->dpll.ref_clks.nssc) { + switch (dev_priv->display.dpll.ref_clks.nssc) { default: - MISSING_CASE(dev_priv->dpll.ref_clks.nssc); + MISSING_CASE(dev_priv->display.dpll.ref_clks.nssc); fallthrough; case 19200: case 38400: @@ -2630,7 +2646,7 @@ static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915, static int icl_wrpll_ref_clock(struct drm_i915_private *i915) { - int ref_clock = i915->dpll.ref_clks.nssc; + int ref_clock = i915->display.dpll.ref_clks.nssc; /* * For ICL+, the spec states: if reference frequency is 38.4, @@ -2769,8 +2785,8 @@ static void icl_calc_dpll_state(struct drm_i915_private *i915, else pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400; - if (i915->vbt.override_afc_startup) - pll_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->vbt.override_afc_startup_val); + if (i915->display.vbt.override_afc_startup) + pll_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->display.vbt.override_afc_startup_val); } static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc, @@ -2857,7 +2873,7 @@ static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, struct intel_dpll_hw_state *pll_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - int refclk_khz = dev_priv->dpll.ref_clks.nssc; + int refclk_khz = dev_priv->display.dpll.ref_clks.nssc; int clock = crtc_state->port_clock; u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac; u32 iref_ndiv, iref_trim, iref_pulse_w; @@ -2965,8 +2981,8 @@ static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, DKL_PLL_DIV0_PROP_COEFF(prop_coeff) | DKL_PLL_DIV0_FBPREDIV(m1div) | DKL_PLL_DIV0_FBDIV_INT(m2div_int); - if (dev_priv->vbt.override_afc_startup) { - u8 val = dev_priv->vbt.override_afc_startup_val; + if (dev_priv->display.vbt.override_afc_startup) { + u8 val = dev_priv->display.vbt.override_afc_startup_val; pll_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val); } @@ -3063,7 +3079,7 @@ static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv, u32 m1, m2_int, m2_frac, div1, div2, ref_clock; u64 tmp; - ref_clock = dev_priv->dpll.ref_clks.nssc; + ref_clock = dev_priv->display.dpll.ref_clks.nssc; if (DISPLAY_VER(dev_priv) >= 12) { m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK; @@ -3197,6 +3213,12 @@ static int icl_compute_combo_phy_dpll(struct intel_atomic_state *state, icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state); + /* this is mainly for the fastset check */ + icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT); + + crtc_state->port_clock = icl_ddi_combo_pll_get_freq(dev_priv, NULL, + &port_dpll->hw_state); + return 0; } @@ -3282,6 +3304,12 @@ static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state, if (ret) return ret; + /* this is mainly for the fastset check */ + icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY); + + crtc_state->port_clock = icl_ddi_mg_pll_get_freq(dev_priv, NULL, + &port_dpll->hw_state); + return 0; } @@ -3440,7 +3468,7 @@ static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv, hw_state->mg_pll_tdc_coldst_bias = intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port)); - if (dev_priv->dpll.ref_clks.nssc == 38400) { + if (dev_priv->display.dpll.ref_clks.nssc == 38400) { hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART; hw_state->mg_pll_bias_mask = 0; } else { @@ -3502,7 +3530,7 @@ static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv, hw_state->mg_pll_div0 = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port)); val = DKL_PLL_DIV0_MASK; - if (dev_priv->vbt.override_afc_startup) + if (dev_priv->display.vbt.override_afc_startup) val |= DKL_PLL_DIV0_AFC_STARTUP_MASK; hw_state->mg_pll_div0 &= val; @@ -3566,7 +3594,7 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv, TGL_DPLL_CFGCR0(id)); hw_state->cfgcr1 = intel_de_read(dev_priv, TGL_DPLL_CFGCR1(id)); - if (dev_priv->vbt.override_afc_startup) { + if (dev_priv->display.vbt.override_afc_startup) { hw_state->div0 = intel_de_read(dev_priv, TGL_DPLL0_DIV0(id)); hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK; } @@ -3638,9 +3666,9 @@ static void icl_dpll_write(struct drm_i915_private *dev_priv, intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0); intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1); - drm_WARN_ON_ONCE(&dev_priv->drm, dev_priv->vbt.override_afc_startup && + drm_WARN_ON_ONCE(&dev_priv->drm, dev_priv->display.vbt.override_afc_startup && !i915_mmio_reg_valid(div0_reg)); - if (dev_priv->vbt.override_afc_startup && + if (dev_priv->display.vbt.override_afc_startup && i915_mmio_reg_valid(div0_reg)) intel_de_rmw(dev_priv, div0_reg, TGL_DPLL0_DIV0_AFC_STARTUP_MASK, hw_state->div0); @@ -3732,7 +3760,7 @@ static void dkl_pll_write(struct drm_i915_private *dev_priv, intel_de_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val); val = DKL_PLL_DIV0_MASK; - if (dev_priv->vbt.override_afc_startup) + if (dev_priv->display.vbt.override_afc_startup) val |= DKL_PLL_DIV0_AFC_STARTUP_MASK; intel_de_rmw(dev_priv, DKL_PLL_DIV0(tc_port), val, hw_state->mg_pll_div0); @@ -3967,7 +3995,7 @@ static void mg_pll_disable(struct drm_i915_private *dev_priv, static void icl_update_dpll_ref_clks(struct drm_i915_private *i915) { /* No SSC ref */ - i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref; + i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref; } static void icl_dump_hw_state(struct drm_i915_private *dev_priv, @@ -4192,22 +4220,24 @@ void intel_shared_dpll_init(struct drm_i915_private *dev_priv) dpll_mgr = &pch_pll_mgr; if (!dpll_mgr) { - dev_priv->dpll.num_shared_dpll = 0; + dev_priv->display.dpll.num_shared_dpll = 0; return; } dpll_info = dpll_mgr->dpll_info; for (i = 0; dpll_info[i].name; i++) { + if (drm_WARN_ON(&dev_priv->drm, + i >= ARRAY_SIZE(dev_priv->display.dpll.shared_dplls))) + break; + drm_WARN_ON(&dev_priv->drm, i != dpll_info[i].id); - dev_priv->dpll.shared_dplls[i].info = &dpll_info[i]; + dev_priv->display.dpll.shared_dplls[i].info = &dpll_info[i]; } - dev_priv->dpll.mgr = dpll_mgr; - dev_priv->dpll.num_shared_dpll = i; - mutex_init(&dev_priv->dpll.lock); - - BUG_ON(dev_priv->dpll.num_shared_dpll > I915_NUM_PLLS); + dev_priv->display.dpll.mgr = dpll_mgr; + dev_priv->display.dpll.num_shared_dpll = i; + mutex_init(&dev_priv->display.dpll.lock); } /** @@ -4229,7 +4259,7 @@ int intel_compute_shared_dplls(struct intel_atomic_state *state, struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); - const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr; + const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr; if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr)) return -EINVAL; @@ -4262,7 +4292,7 @@ int intel_reserve_shared_dplls(struct intel_atomic_state *state, struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); - const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr; + const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr; if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr)) return -EINVAL; @@ -4285,7 +4315,7 @@ void intel_release_shared_dplls(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); - const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr; + const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr; /* * FIXME: this function is called for every platform having a @@ -4314,7 +4344,7 @@ void intel_update_active_dpll(struct intel_atomic_state *state, struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr; + const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr; if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr)) return; @@ -4385,16 +4415,16 @@ static void readout_dpll_hw_state(struct drm_i915_private *i915, void intel_dpll_update_ref_clks(struct drm_i915_private *i915) { - if (i915->dpll.mgr && i915->dpll.mgr->update_ref_clks) - i915->dpll.mgr->update_ref_clks(i915); + if (i915->display.dpll.mgr && i915->display.dpll.mgr->update_ref_clks) + i915->display.dpll.mgr->update_ref_clks(i915); } void intel_dpll_readout_hw_state(struct drm_i915_private *i915) { int i; - for (i = 0; i < i915->dpll.num_shared_dpll; i++) - readout_dpll_hw_state(i915, &i915->dpll.shared_dplls[i]); + for (i = 0; i < i915->display.dpll.num_shared_dpll; i++) + readout_dpll_hw_state(i915, &i915->display.dpll.shared_dplls[i]); } static void sanitize_dpll_state(struct drm_i915_private *i915, @@ -4420,8 +4450,8 @@ void intel_dpll_sanitize_state(struct drm_i915_private *i915) { int i; - for (i = 0; i < i915->dpll.num_shared_dpll; i++) - sanitize_dpll_state(i915, &i915->dpll.shared_dplls[i]); + for (i = 0; i < i915->display.dpll.num_shared_dpll; i++) + sanitize_dpll_state(i915, &i915->display.dpll.shared_dplls[i]); } /** @@ -4434,8 +4464,8 @@ void intel_dpll_sanitize_state(struct drm_i915_private *i915) void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv, const struct intel_dpll_hw_state *hw_state) { - if (dev_priv->dpll.mgr) { - dev_priv->dpll.mgr->dump_hw_state(dev_priv, hw_state); + if (dev_priv->display.dpll.mgr) { + dev_priv->display.dpll.mgr->dump_hw_state(dev_priv, hw_state); } else { /* fallback for platforms that don't use the shared dpll * infrastructure @@ -4533,7 +4563,7 @@ void intel_shared_dpll_verify_disabled(struct drm_i915_private *i915) { int i; - for (i = 0; i < i915->dpll.num_shared_dpll; i++) - verify_single_dpll_state(i915, &i915->dpll.shared_dplls[i], + for (i = 0; i < i915->display.dpll.num_shared_dpll; i++) + verify_single_dpll_state(i915, &i915->display.dpll.shared_dplls[i], NULL, NULL); } diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c index c4affcb216fd..fc9c3e41c333 100644 --- a/drivers/gpu/drm/i915/display/intel_dsb.c +++ b/drivers/gpu/drm/i915/display/intel_dsb.c @@ -9,6 +9,36 @@ #include "i915_drv.h" #include "intel_de.h" #include "intel_display_types.h" +#include "intel_dsb.h" + +struct i915_vma; + +enum dsb_id { + INVALID_DSB = -1, + DSB1, + DSB2, + DSB3, + MAX_DSB_PER_PIPE +}; + +struct intel_dsb { + enum dsb_id id; + u32 *cmd_buf; + struct i915_vma *vma; + + /* + * free_pos will point the first free entry position + * and help in calculating tail of command buffer. + */ + int free_pos; + + /* + * ins_start_offset will help to store start address of the dsb + * instuction and help in identifying the batch of auto-increment + * register. + */ + u32 ins_start_offset; +}; #define DSB_BUF_SIZE (2 * PAGE_SIZE) diff --git a/drivers/gpu/drm/i915/display/intel_dsb.h b/drivers/gpu/drm/i915/display/intel_dsb.h index 6cb9c580cdca..74dd2b3343bb 100644 --- a/drivers/gpu/drm/i915/display/intel_dsb.h +++ b/drivers/gpu/drm/i915/display/intel_dsb.h @@ -11,34 +11,6 @@ #include "i915_reg_defs.h" struct intel_crtc_state; -struct i915_vma; - -enum dsb_id { - INVALID_DSB = -1, - DSB1, - DSB2, - DSB3, - MAX_DSB_PER_PIPE -}; - -struct intel_dsb { - enum dsb_id id; - u32 *cmd_buf; - struct i915_vma *vma; - - /* - * free_pos will point the first free entry position - * and help in calculating tail of command buffer. - */ - int free_pos; - - /* - * ins_start_offset will help to store start address of the dsb - * instuction and help in identifying the batch of auto-increment - * register. - */ - u32 ins_start_offset; -}; void intel_dsb_prepare(struct intel_crtc_state *crtc_state); void intel_dsb_cleanup(struct intel_crtc_state *crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_dsi.c b/drivers/gpu/drm/i915/display/intel_dsi.c index 35e121cd226c..5efdd471ac2b 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi.c +++ b/drivers/gpu/drm/i915/display/intel_dsi.c @@ -106,7 +106,7 @@ intel_dsi_get_panel_orientation(struct intel_connector *connector) if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN) return orientation; - orientation = dev_priv->vbt.orientation; + orientation = dev_priv->display.vbt.orientation; if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN) return orientation; diff --git a/drivers/gpu/drm/i915/display/intel_dsi.h b/drivers/gpu/drm/i915/display/intel_dsi.h index eafef0a87fea..ce80bd8be519 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi.h +++ b/drivers/gpu/drm/i915/display/intel_dsi.h @@ -89,9 +89,6 @@ struct intel_dsi { u8 escape_clk_div; u8 dual_link; - u16 dcs_backlight_ports; - u16 dcs_cabc_ports; - /* RGB or BGR */ bool bgr_enabled; diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c index 1bc7118c56a2..20e466d843ce 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c @@ -53,7 +53,7 @@ static u32 dcs_get_backlight(struct intel_connector *connector, enum pipe unused enum port port; size_t len = panel->backlight.max > U8_MAX ? 2 : 1; - for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) { + for_each_dsi_port(port, panel->vbt.dsi.bl_ports) { dsi_device = intel_dsi->dsi_hosts[port]->device; mipi_dsi_dcs_read(dsi_device, MIPI_DCS_GET_DISPLAY_BRIGHTNESS, &data, len); @@ -80,7 +80,7 @@ static void dcs_set_backlight(const struct drm_connector_state *conn_state, u32 data[1] = level; } - for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) { + for_each_dsi_port(port, panel->vbt.dsi.bl_ports) { dsi_device = intel_dsi->dsi_hosts[port]->device; mode_flags = dsi_device->mode_flags; dsi_device->mode_flags &= ~MIPI_DSI_MODE_LPM; @@ -93,12 +93,13 @@ static void dcs_set_backlight(const struct drm_connector_state *conn_state, u32 static void dcs_disable_backlight(const struct drm_connector_state *conn_state, u32 level) { struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder)); + struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel; struct mipi_dsi_device *dsi_device; enum port port; dcs_set_backlight(conn_state, 0); - for_each_dsi_port(port, intel_dsi->dcs_cabc_ports) { + for_each_dsi_port(port, panel->vbt.dsi.cabc_ports) { u8 cabc = POWER_SAVE_OFF; dsi_device = intel_dsi->dsi_hosts[port]->device; @@ -106,7 +107,7 @@ static void dcs_disable_backlight(const struct drm_connector_state *conn_state, &cabc, sizeof(cabc)); } - for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) { + for_each_dsi_port(port, panel->vbt.dsi.bl_ports) { u8 ctrl = 0; dsi_device = intel_dsi->dsi_hosts[port]->device; @@ -127,10 +128,11 @@ static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state, u32 level) { struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder)); + struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel; struct mipi_dsi_device *dsi_device; enum port port; - for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) { + for_each_dsi_port(port, panel->vbt.dsi.bl_ports) { u8 ctrl = 0; dsi_device = intel_dsi->dsi_hosts[port]->device; @@ -146,7 +148,7 @@ static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state, &ctrl, sizeof(ctrl)); } - for_each_dsi_port(port, intel_dsi->dcs_cabc_ports) { + for_each_dsi_port(port, panel->vbt.dsi.cabc_ports) { u8 cabc = POWER_SAVE_MEDIUM; dsi_device = intel_dsi->dsi_hosts[port]->device; diff --git a/drivers/gpu/drm/i915/display/intel_dvo_dev.h b/drivers/gpu/drm/i915/display/intel_dvo_dev.h index d96c3cc46e50..50205f064d93 100644 --- a/drivers/gpu/drm/i915/display/intel_dvo_dev.h +++ b/drivers/gpu/drm/i915/display/intel_dvo_dev.h @@ -75,8 +75,8 @@ struct intel_dvo_dev_ops { * * \return MODE_OK if the mode is valid, or another MODE_* otherwise. */ - int (*mode_valid)(struct intel_dvo_device *dvo, - struct drm_display_mode *mode); + enum drm_mode_status (*mode_valid)(struct intel_dvo_device *dvo, + struct drm_display_mode *mode); /* * Callback for preparing mode changes on an output diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index 16537830ccf0..f38175304928 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -55,11 +55,11 @@ #define for_each_fbc_id(__dev_priv, __fbc_id) \ for ((__fbc_id) = INTEL_FBC_A; (__fbc_id) < I915_MAX_FBCS; (__fbc_id)++) \ - for_each_if(INTEL_INFO(__dev_priv)->display.fbc_mask & BIT(__fbc_id)) + for_each_if(RUNTIME_INFO(__dev_priv)->fbc_mask & BIT(__fbc_id)) #define for_each_intel_fbc(__dev_priv, __fbc, __fbc_id) \ for_each_fbc_id((__dev_priv), (__fbc_id)) \ - for_each_if((__fbc) = (__dev_priv)->fbc[(__fbc_id)]) + for_each_if((__fbc) = (__dev_priv)->display.fbc[(__fbc_id)]) struct intel_fbc_funcs { void (*activate)(struct intel_fbc *fbc); @@ -1098,6 +1098,12 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state, return 0; } + /* Wa_14016291713 */ + if (IS_DISPLAY_VER(i915, 12, 13) && crtc_state->has_psr) { + plane_state->no_fbc_reason = "PSR1 enabled (Wa_14016291713)"; + return 0; + } + if (!pixel_format_is_valid(plane_state)) { plane_state->no_fbc_reason = "pixel format not supported"; return 0; @@ -1704,17 +1710,17 @@ void intel_fbc_init(struct drm_i915_private *i915) enum intel_fbc_id fbc_id; if (!drm_mm_initialized(&i915->mm.stolen)) - mkwrite_device_info(i915)->display.fbc_mask = 0; + RUNTIME_INFO(i915)->fbc_mask = 0; if (need_fbc_vtd_wa(i915)) - mkwrite_device_info(i915)->display.fbc_mask = 0; + RUNTIME_INFO(i915)->fbc_mask = 0; i915->params.enable_fbc = intel_sanitize_fbc_option(i915); drm_dbg_kms(&i915->drm, "Sanitized enable_fbc value: %d\n", i915->params.enable_fbc); for_each_fbc_id(i915, fbc_id) - i915->fbc[fbc_id] = intel_fbc_create(i915, fbc_id); + i915->display.fbc[fbc_id] = intel_fbc_create(i915, fbc_id); } /** @@ -1834,7 +1840,7 @@ void intel_fbc_debugfs_register(struct drm_i915_private *i915) struct drm_minor *minor = i915->drm.primary; struct intel_fbc *fbc; - fbc = i915->fbc[INTEL_FBC_A]; + fbc = i915->display.fbc[INTEL_FBC_A]; if (fbc) intel_fbc_debugfs_add(fbc, minor->debugfs_root); } diff --git a/drivers/gpu/drm/i915/display/intel_fbc.h b/drivers/gpu/drm/i915/display/intel_fbc.h index db60143295ec..4adb98afe6ff 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.h +++ b/drivers/gpu/drm/i915/display/intel_fbc.h @@ -19,6 +19,7 @@ struct intel_plane_state; enum intel_fbc_id { INTEL_FBC_A, + INTEL_FBC_B, I915_MAX_FBCS, }; diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c index 1922f62d04c0..112aa0447a0d 100644 --- a/drivers/gpu/drm/i915/display/intel_fbdev.c +++ b/drivers/gpu/drm/i915/display/intel_fbdev.c @@ -210,6 +210,12 @@ static int intelfb_create(struct drm_fb_helper *helper, struct drm_i915_gem_object *obj; int ret; + mutex_lock(&ifbdev->hpd_lock); + ret = ifbdev->hpd_suspended ? -EAGAIN : 0; + mutex_unlock(&ifbdev->hpd_lock); + if (ret) + return ret; + if (intel_fb && (sizes->fb_width > intel_fb->base.width || sizes->fb_height > intel_fb->base.height)) { @@ -500,7 +506,7 @@ static void intel_fbdev_suspend_worker(struct work_struct *work) { intel_fbdev_set_suspend(&container_of(work, struct drm_i915_private, - fbdev_suspend_work)->drm, + display.fbdev.suspend_work)->drm, FBINFO_STATE_RUNNING, true); } @@ -530,8 +536,8 @@ int intel_fbdev_init(struct drm_device *dev) return ret; } - dev_priv->fbdev = ifbdev; - INIT_WORK(&dev_priv->fbdev_suspend_work, intel_fbdev_suspend_worker); + dev_priv->display.fbdev.fbdev = ifbdev; + INIT_WORK(&dev_priv->display.fbdev.suspend_work, intel_fbdev_suspend_worker); return 0; } @@ -548,7 +554,7 @@ static void intel_fbdev_initial_config(void *data, async_cookie_t cookie) void intel_fbdev_initial_config_async(struct drm_device *dev) { - struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; + struct intel_fbdev *ifbdev = to_i915(dev)->display.fbdev.fbdev; if (!ifbdev) return; @@ -568,12 +574,13 @@ static void intel_fbdev_sync(struct intel_fbdev *ifbdev) void intel_fbdev_unregister(struct drm_i915_private *dev_priv) { - struct intel_fbdev *ifbdev = dev_priv->fbdev; + struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev; if (!ifbdev) return; - cancel_work_sync(&dev_priv->fbdev_suspend_work); + intel_fbdev_set_suspend(&dev_priv->drm, FBINFO_STATE_SUSPENDED, true); + if (!current_is_async()) intel_fbdev_sync(ifbdev); @@ -582,7 +589,7 @@ void intel_fbdev_unregister(struct drm_i915_private *dev_priv) void intel_fbdev_fini(struct drm_i915_private *dev_priv) { - struct intel_fbdev *ifbdev = fetch_and_zero(&dev_priv->fbdev); + struct intel_fbdev *ifbdev = fetch_and_zero(&dev_priv->display.fbdev.fbdev); if (!ifbdev) return; @@ -596,7 +603,7 @@ void intel_fbdev_fini(struct drm_i915_private *dev_priv) */ static void intel_fbdev_hpd_set_suspend(struct drm_i915_private *i915, int state) { - struct intel_fbdev *ifbdev = i915->fbdev; + struct intel_fbdev *ifbdev = i915->display.fbdev.fbdev; bool send_hpd = false; mutex_lock(&ifbdev->hpd_lock); @@ -614,11 +621,11 @@ static void intel_fbdev_hpd_set_suspend(struct drm_i915_private *i915, int state void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous) { struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_fbdev *ifbdev = dev_priv->fbdev; + struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev; struct fb_info *info; if (!ifbdev || !ifbdev->vma) - return; + goto set_suspend; info = ifbdev->helper.fbdev; @@ -631,7 +638,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous * ourselves, so only flush outstanding work upon suspend! */ if (state != FBINFO_STATE_RUNNING) - flush_work(&dev_priv->fbdev_suspend_work); + flush_work(&dev_priv->display.fbdev.suspend_work); console_lock(); } else { @@ -645,7 +652,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous /* Don't block our own workqueue as this can * be run in parallel with other i915.ko tasks. */ - schedule_work(&dev_priv->fbdev_suspend_work); + schedule_work(&dev_priv->display.fbdev.suspend_work); return; } } @@ -661,12 +668,13 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous drm_fb_helper_set_suspend(&ifbdev->helper, state); console_unlock(); +set_suspend: intel_fbdev_hpd_set_suspend(dev_priv, state); } void intel_fbdev_output_poll_changed(struct drm_device *dev) { - struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; + struct intel_fbdev *ifbdev = to_i915(dev)->display.fbdev.fbdev; bool send_hpd; if (!ifbdev) @@ -685,7 +693,7 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev) void intel_fbdev_restore_mode(struct drm_device *dev) { - struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; + struct intel_fbdev *ifbdev = to_i915(dev)->display.fbdev.fbdev; if (!ifbdev) return; diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c index 67d2484afbaa..7f47e5c85c81 100644 --- a/drivers/gpu/drm/i915/display/intel_fdi.c +++ b/drivers/gpu/drm/i915/display/intel_fdi.c @@ -113,7 +113,7 @@ void intel_fdi_link_train(struct intel_crtc *crtc, { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - dev_priv->fdi_funcs->fdi_link_train(crtc, crtc_state); + dev_priv->display.funcs.fdi->fdi_link_train(crtc, crtc_state); } /* units of 100MHz */ @@ -210,14 +210,14 @@ void intel_fdi_pll_freq_update(struct drm_i915_private *i915) u32 fdi_pll_clk = intel_de_read(i915, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK; - i915->fdi_pll_freq = (fdi_pll_clk + 2) * 10000; + i915->display.fdi.pll_freq = (fdi_pll_clk + 2) * 10000; } else if (IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915)) { - i915->fdi_pll_freq = 270000; + i915->display.fdi.pll_freq = 270000; } else { return; } - drm_dbg(&i915->drm, "FDI PLL freq=%d\n", i915->fdi_pll_freq); + drm_dbg(&i915->drm, "FDI PLL freq=%d\n", i915->display.fdi.pll_freq); } int intel_fdi_link_freq(struct drm_i915_private *i915, @@ -226,7 +226,7 @@ int intel_fdi_link_freq(struct drm_i915_private *i915, if (HAS_DDI(i915)) return pipe_config->port_clock; /* SPLL */ else - return i915->fdi_pll_freq; + return i915->display.fdi.pll_freq; } int ilk_fdi_compute_config(struct intel_crtc *crtc, @@ -256,7 +256,7 @@ retry: pipe_config->fdi_lanes = lane; intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock, - link_bw, &pipe_config->fdi_m_n, false, false); + link_bw, &pipe_config->fdi_m_n, false); ret = ilk_check_fdi_lanes(dev, crtc->pipe, pipe_config); if (ret == -EDEADLK) @@ -789,7 +789,7 @@ void hsw_fdi_link_train(struct intel_encoder *encoder, FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2) | FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); /* Enable the PCH Receiver FDI PLL */ - rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE | + rx_ctl_val = dev_priv->display.fdi.rx_config | FDI_RX_ENHANCE_FRAME_ENABLE | FDI_RX_PLL_ENABLE | FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val); @@ -1066,11 +1066,11 @@ void intel_fdi_init_hook(struct drm_i915_private *dev_priv) { if (IS_IRONLAKE(dev_priv)) { - dev_priv->fdi_funcs = &ilk_funcs; + dev_priv->display.funcs.fdi = &ilk_funcs; } else if (IS_SANDYBRIDGE(dev_priv)) { - dev_priv->fdi_funcs = &gen6_funcs; + dev_priv->display.funcs.fdi = &gen6_funcs; } else if (IS_IVYBRIDGE(dev_priv)) { /* FIXME: detect B0+ stepping and use auto training */ - dev_priv->fdi_funcs = &ivb_funcs; + dev_priv->display.funcs.fdi = &ivb_funcs; } } diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c index 791248f812aa..d80e3e8a9b01 100644 --- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c +++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c @@ -81,9 +81,9 @@ static void frontbuffer_flush(struct drm_i915_private *i915, enum fb_op_origin origin) { /* Delay flushing when rings are still busy.*/ - spin_lock(&i915->fb_tracking.lock); - frontbuffer_bits &= ~i915->fb_tracking.busy_bits; - spin_unlock(&i915->fb_tracking.lock); + spin_lock(&i915->display.fb_tracking.lock); + frontbuffer_bits &= ~i915->display.fb_tracking.busy_bits; + spin_unlock(&i915->display.fb_tracking.lock); if (!frontbuffer_bits) return; @@ -111,11 +111,11 @@ static void frontbuffer_flush(struct drm_i915_private *i915, void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915, unsigned frontbuffer_bits) { - spin_lock(&i915->fb_tracking.lock); - i915->fb_tracking.flip_bits |= frontbuffer_bits; + spin_lock(&i915->display.fb_tracking.lock); + i915->display.fb_tracking.flip_bits |= frontbuffer_bits; /* Remove stale busy bits due to the old buffer. */ - i915->fb_tracking.busy_bits &= ~frontbuffer_bits; - spin_unlock(&i915->fb_tracking.lock); + i915->display.fb_tracking.busy_bits &= ~frontbuffer_bits; + spin_unlock(&i915->display.fb_tracking.lock); } /** @@ -131,11 +131,11 @@ void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915, void intel_frontbuffer_flip_complete(struct drm_i915_private *i915, unsigned frontbuffer_bits) { - spin_lock(&i915->fb_tracking.lock); + spin_lock(&i915->display.fb_tracking.lock); /* Mask any cancelled flips. */ - frontbuffer_bits &= i915->fb_tracking.flip_bits; - i915->fb_tracking.flip_bits &= ~frontbuffer_bits; - spin_unlock(&i915->fb_tracking.lock); + frontbuffer_bits &= i915->display.fb_tracking.flip_bits; + i915->display.fb_tracking.flip_bits &= ~frontbuffer_bits; + spin_unlock(&i915->display.fb_tracking.lock); if (frontbuffer_bits) frontbuffer_flush(i915, frontbuffer_bits, ORIGIN_FLIP); @@ -155,10 +155,10 @@ void intel_frontbuffer_flip_complete(struct drm_i915_private *i915, void intel_frontbuffer_flip(struct drm_i915_private *i915, unsigned frontbuffer_bits) { - spin_lock(&i915->fb_tracking.lock); + spin_lock(&i915->display.fb_tracking.lock); /* Remove stale busy bits due to the old buffer. */ - i915->fb_tracking.busy_bits &= ~frontbuffer_bits; - spin_unlock(&i915->fb_tracking.lock); + i915->display.fb_tracking.busy_bits &= ~frontbuffer_bits; + spin_unlock(&i915->display.fb_tracking.lock); frontbuffer_flush(i915, frontbuffer_bits, ORIGIN_FLIP); } @@ -170,10 +170,10 @@ void __intel_fb_invalidate(struct intel_frontbuffer *front, struct drm_i915_private *i915 = to_i915(front->obj->base.dev); if (origin == ORIGIN_CS) { - spin_lock(&i915->fb_tracking.lock); - i915->fb_tracking.busy_bits |= frontbuffer_bits; - i915->fb_tracking.flip_bits &= ~frontbuffer_bits; - spin_unlock(&i915->fb_tracking.lock); + spin_lock(&i915->display.fb_tracking.lock); + i915->display.fb_tracking.busy_bits |= frontbuffer_bits; + i915->display.fb_tracking.flip_bits &= ~frontbuffer_bits; + spin_unlock(&i915->display.fb_tracking.lock); } trace_intel_frontbuffer_invalidate(frontbuffer_bits, origin); @@ -191,11 +191,11 @@ void __intel_fb_flush(struct intel_frontbuffer *front, struct drm_i915_private *i915 = to_i915(front->obj->base.dev); if (origin == ORIGIN_CS) { - spin_lock(&i915->fb_tracking.lock); + spin_lock(&i915->display.fb_tracking.lock); /* Filter out new bits since rendering started. */ - frontbuffer_bits &= i915->fb_tracking.busy_bits; - i915->fb_tracking.busy_bits &= ~frontbuffer_bits; - spin_unlock(&i915->fb_tracking.lock); + frontbuffer_bits &= i915->display.fb_tracking.busy_bits; + i915->display.fb_tracking.busy_bits &= ~frontbuffer_bits; + spin_unlock(&i915->display.fb_tracking.lock); } if (frontbuffer_bits) @@ -221,7 +221,7 @@ static void frontbuffer_retire(struct i915_active *ref) } static void frontbuffer_release(struct kref *ref) - __releases(&to_i915(front->obj->base.dev)->fb_tracking.lock) + __releases(&to_i915(front->obj->base.dev)->display.fb_tracking.lock) { struct intel_frontbuffer *front = container_of(ref, typeof(*front), ref); @@ -238,7 +238,7 @@ static void frontbuffer_release(struct kref *ref) spin_unlock(&obj->vma.lock); RCU_INIT_POINTER(obj->frontbuffer, NULL); - spin_unlock(&to_i915(obj->base.dev)->fb_tracking.lock); + spin_unlock(&to_i915(obj->base.dev)->display.fb_tracking.lock); i915_active_fini(&front->write); @@ -268,7 +268,7 @@ intel_frontbuffer_get(struct drm_i915_gem_object *obj) frontbuffer_retire, I915_ACTIVE_RETIRE_SLEEPS); - spin_lock(&i915->fb_tracking.lock); + spin_lock(&i915->display.fb_tracking.lock); if (rcu_access_pointer(obj->frontbuffer)) { kfree(front); front = rcu_dereference_protected(obj->frontbuffer, true); @@ -277,7 +277,7 @@ intel_frontbuffer_get(struct drm_i915_gem_object *obj) i915_gem_object_get(obj); rcu_assign_pointer(obj->frontbuffer, front); } - spin_unlock(&i915->fb_tracking.lock); + spin_unlock(&i915->display.fb_tracking.lock); return front; } @@ -286,7 +286,7 @@ void intel_frontbuffer_put(struct intel_frontbuffer *front) { kref_put_lock(&front->ref, frontbuffer_release, - &to_i915(front->obj->base.dev)->fb_tracking.lock); + &to_i915(front->obj->base.dev)->display.fb_tracking.lock); } /** @@ -311,6 +311,8 @@ void intel_frontbuffer_track(struct intel_frontbuffer *old, */ BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > BITS_PER_TYPE(atomic_t)); + BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > 32); + BUILD_BUG_ON(I915_MAX_PLANES > INTEL_FRONTBUFFER_BITS_PER_PIPE); if (old) { drm_WARN_ON(old->obj->base.dev, diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.h b/drivers/gpu/drm/i915/display/intel_frontbuffer.h index ff0c37b079aa..3c474ed937fb 100644 --- a/drivers/gpu/drm/i915/display/intel_frontbuffer.h +++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.h @@ -25,6 +25,7 @@ #define __INTEL_FRONTBUFFER_H__ #include <linux/atomic.h> +#include <linux/bits.h> #include <linux/kref.h> #include "gem/i915_gem_object_types.h" @@ -48,6 +49,23 @@ struct intel_frontbuffer { struct rcu_head rcu; }; +/* + * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is + * considered to be the frontbuffer for the given plane interface-wise. This + * doesn't mean that the hw necessarily already scans it out, but that any + * rendering (by the cpu or gpu) will land in the frontbuffer eventually. + * + * We have one bit per pipe and per scanout plane type. + */ +#define INTEL_FRONTBUFFER_BITS_PER_PIPE 8 +#define INTEL_FRONTBUFFER(pipe, plane_id) \ + BIT((plane_id) + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)); +#define INTEL_FRONTBUFFER_OVERLAY(pipe) \ + BIT(INTEL_FRONTBUFFER_BITS_PER_PIPE - 1 + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)) +#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \ + GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1, \ + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)) + void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915, unsigned frontbuffer_bits); void intel_frontbuffer_flip_complete(struct drm_i915_private *i915, diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c index a6ba7fb72339..74443f57f62d 100644 --- a/drivers/gpu/drm/i915/display/intel_gmbus.c +++ b/drivers/gpu/drm/i915/display/intel_gmbus.c @@ -37,6 +37,7 @@ #include "intel_de.h" #include "intel_display_types.h" #include "intel_gmbus.h" +#include "intel_gmbus_regs.h" struct intel_gmbus { struct i2c_adapter adapter; @@ -45,7 +46,7 @@ struct intel_gmbus { u32 reg0; i915_reg_t gpio_reg; struct i2c_algo_bit_data bit_algo; - struct drm_i915_private *dev_priv; + struct drm_i915_private *i915; }; struct gmbus_pin { @@ -116,6 +117,18 @@ static const struct gmbus_pin gmbus_pins_dg2[] = { [GMBUS_PIN_9_TC1_ICP] = { "tc1", GPIOJ }, }; +static const struct gmbus_pin gmbus_pins_mtp[] = { + [GMBUS_PIN_1_BXT] = { "dpa", GPIOB }, + [GMBUS_PIN_2_BXT] = { "dpb", GPIOC }, + [GMBUS_PIN_3_BXT] = { "dpc", GPIOD }, + [GMBUS_PIN_4_CNP] = { "dpd", GPIOE }, + [GMBUS_PIN_5_MTP] = { "dpe", GPIOF }, + [GMBUS_PIN_9_TC1_ICP] = { "tc1", GPIOJ }, + [GMBUS_PIN_10_TC2_ICP] = { "tc2", GPIOK }, + [GMBUS_PIN_11_TC3_ICP] = { "tc3", GPIOL }, + [GMBUS_PIN_12_TC4_ICP] = { "tc4", GPIOM }, +}; + static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *i915, unsigned int pin) { @@ -128,6 +141,9 @@ static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *i915, } else if (INTEL_PCH_TYPE(i915) >= PCH_DG1) { pins = gmbus_pins_dg1; size = ARRAY_SIZE(gmbus_pins_dg1); + } else if (INTEL_PCH_TYPE(i915) >= PCH_MTP) { + pins = gmbus_pins_mtp; + size = ARRAY_SIZE(gmbus_pins_mtp); } else if (INTEL_PCH_TYPE(i915) >= PCH_ICP) { pins = gmbus_pins_icp; size = ARRAY_SIZE(gmbus_pins_icp); @@ -170,55 +186,55 @@ to_intel_gmbus(struct i2c_adapter *i2c) } void -intel_gmbus_reset(struct drm_i915_private *dev_priv) +intel_gmbus_reset(struct drm_i915_private *i915) { - intel_de_write(dev_priv, GMBUS0, 0); - intel_de_write(dev_priv, GMBUS4, 0); + intel_de_write(i915, GMBUS0(i915), 0); + intel_de_write(i915, GMBUS4(i915), 0); } -static void pnv_gmbus_clock_gating(struct drm_i915_private *dev_priv, +static void pnv_gmbus_clock_gating(struct drm_i915_private *i915, bool enable) { u32 val; /* When using bit bashing for I2C, this bit needs to be set to 1 */ - val = intel_de_read(dev_priv, DSPCLK_GATE_D); + val = intel_de_read(i915, DSPCLK_GATE_D(i915)); if (!enable) val |= PNV_GMBUSUNIT_CLOCK_GATE_DISABLE; else val &= ~PNV_GMBUSUNIT_CLOCK_GATE_DISABLE; - intel_de_write(dev_priv, DSPCLK_GATE_D, val); + intel_de_write(i915, DSPCLK_GATE_D(i915), val); } -static void pch_gmbus_clock_gating(struct drm_i915_private *dev_priv, +static void pch_gmbus_clock_gating(struct drm_i915_private *i915, bool enable) { u32 val; - val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D); + val = intel_de_read(i915, SOUTH_DSPCLK_GATE_D); if (!enable) val |= PCH_GMBUSUNIT_CLOCK_GATE_DISABLE; else val &= ~PCH_GMBUSUNIT_CLOCK_GATE_DISABLE; - intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val); + intel_de_write(i915, SOUTH_DSPCLK_GATE_D, val); } -static void bxt_gmbus_clock_gating(struct drm_i915_private *dev_priv, +static void bxt_gmbus_clock_gating(struct drm_i915_private *i915, bool enable) { u32 val; - val = intel_de_read(dev_priv, GEN9_CLKGATE_DIS_4); + val = intel_de_read(i915, GEN9_CLKGATE_DIS_4); if (!enable) val |= BXT_GMBUS_GATING_DIS; else val &= ~BXT_GMBUS_GATING_DIS; - intel_de_write(dev_priv, GEN9_CLKGATE_DIS_4, val); + intel_de_write(i915, GEN9_CLKGATE_DIS_4, val); } static u32 get_reserved(struct intel_gmbus *bus) { - struct drm_i915_private *i915 = bus->dev_priv; + struct drm_i915_private *i915 = bus->i915; struct intel_uncore *uncore = &i915->uncore; u32 reserved = 0; @@ -234,7 +250,7 @@ static u32 get_reserved(struct intel_gmbus *bus) static int get_clock(void *data) { struct intel_gmbus *bus = data; - struct intel_uncore *uncore = &bus->dev_priv->uncore; + struct intel_uncore *uncore = &bus->i915->uncore; u32 reserved = get_reserved(bus); intel_uncore_write_notrace(uncore, @@ -249,7 +265,7 @@ static int get_clock(void *data) static int get_data(void *data) { struct intel_gmbus *bus = data; - struct intel_uncore *uncore = &bus->dev_priv->uncore; + struct intel_uncore *uncore = &bus->i915->uncore; u32 reserved = get_reserved(bus); intel_uncore_write_notrace(uncore, @@ -264,7 +280,7 @@ static int get_data(void *data) static void set_clock(void *data, int state_high) { struct intel_gmbus *bus = data; - struct intel_uncore *uncore = &bus->dev_priv->uncore; + struct intel_uncore *uncore = &bus->i915->uncore; u32 reserved = get_reserved(bus); u32 clock_bits; @@ -283,7 +299,7 @@ static void set_clock(void *data, int state_high) static void set_data(void *data, int state_high) { struct intel_gmbus *bus = data; - struct intel_uncore *uncore = &bus->dev_priv->uncore; + struct intel_uncore *uncore = &bus->i915->uncore; u32 reserved = get_reserved(bus); u32 data_bits; @@ -301,12 +317,12 @@ static int intel_gpio_pre_xfer(struct i2c_adapter *adapter) { struct intel_gmbus *bus = to_intel_gmbus(adapter); - struct drm_i915_private *dev_priv = bus->dev_priv; + struct drm_i915_private *i915 = bus->i915; - intel_gmbus_reset(dev_priv); + intel_gmbus_reset(i915); - if (IS_PINEVIEW(dev_priv)) - pnv_gmbus_clock_gating(dev_priv, false); + if (IS_PINEVIEW(i915)) + pnv_gmbus_clock_gating(i915, false); set_data(bus, 1); set_clock(bus, 1); @@ -318,13 +334,13 @@ static void intel_gpio_post_xfer(struct i2c_adapter *adapter) { struct intel_gmbus *bus = to_intel_gmbus(adapter); - struct drm_i915_private *dev_priv = bus->dev_priv; + struct drm_i915_private *i915 = bus->i915; set_data(bus, 1); set_clock(bus, 1); - if (IS_PINEVIEW(dev_priv)) - pnv_gmbus_clock_gating(dev_priv, true); + if (IS_PINEVIEW(i915)) + pnv_gmbus_clock_gating(i915, true); } static void @@ -356,7 +372,7 @@ static bool has_gmbus_irq(struct drm_i915_private *i915) return HAS_GMBUS_IRQ(i915) && intel_irqs_enabled(i915); } -static int gmbus_wait(struct drm_i915_private *dev_priv, u32 status, u32 irq_en) +static int gmbus_wait(struct drm_i915_private *i915, u32 status, u32 irq_en) { DEFINE_WAIT(wait); u32 gmbus2; @@ -366,21 +382,21 @@ static int gmbus_wait(struct drm_i915_private *dev_priv, u32 status, u32 irq_en) * we also need to check for NAKs besides the hw ready/idle signal, we * need to wake up periodically and check that ourselves. */ - if (!has_gmbus_irq(dev_priv)) + if (!has_gmbus_irq(i915)) irq_en = 0; - add_wait_queue(&dev_priv->gmbus_wait_queue, &wait); - intel_de_write_fw(dev_priv, GMBUS4, irq_en); + add_wait_queue(&i915->display.gmbus.wait_queue, &wait); + intel_de_write_fw(i915, GMBUS4(i915), irq_en); status |= GMBUS_SATOER; - ret = wait_for_us((gmbus2 = intel_de_read_fw(dev_priv, GMBUS2)) & status, + ret = wait_for_us((gmbus2 = intel_de_read_fw(i915, GMBUS2(i915))) & status, 2); if (ret) - ret = wait_for((gmbus2 = intel_de_read_fw(dev_priv, GMBUS2)) & status, + ret = wait_for((gmbus2 = intel_de_read_fw(i915, GMBUS2(i915))) & status, 50); - intel_de_write_fw(dev_priv, GMBUS4, 0); - remove_wait_queue(&dev_priv->gmbus_wait_queue, &wait); + intel_de_write_fw(i915, GMBUS4(i915), 0); + remove_wait_queue(&i915->display.gmbus.wait_queue, &wait); if (gmbus2 & GMBUS_SATOER) return -ENXIO; @@ -389,7 +405,7 @@ static int gmbus_wait(struct drm_i915_private *dev_priv, u32 status, u32 irq_en) } static int -gmbus_wait_idle(struct drm_i915_private *dev_priv) +gmbus_wait_idle(struct drm_i915_private *i915) { DEFINE_WAIT(wait); u32 irq_enable; @@ -397,35 +413,35 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv) /* Important: The hw handles only the first bit, so set only one! */ irq_enable = 0; - if (has_gmbus_irq(dev_priv)) + if (has_gmbus_irq(i915)) irq_enable = GMBUS_IDLE_EN; - add_wait_queue(&dev_priv->gmbus_wait_queue, &wait); - intel_de_write_fw(dev_priv, GMBUS4, irq_enable); + add_wait_queue(&i915->display.gmbus.wait_queue, &wait); + intel_de_write_fw(i915, GMBUS4(i915), irq_enable); - ret = intel_wait_for_register_fw(&dev_priv->uncore, - GMBUS2, GMBUS_ACTIVE, 0, + ret = intel_wait_for_register_fw(&i915->uncore, + GMBUS2(i915), GMBUS_ACTIVE, 0, 10); - intel_de_write_fw(dev_priv, GMBUS4, 0); - remove_wait_queue(&dev_priv->gmbus_wait_queue, &wait); + intel_de_write_fw(i915, GMBUS4(i915), 0); + remove_wait_queue(&i915->display.gmbus.wait_queue, &wait); return ret; } -static unsigned int gmbus_max_xfer_size(struct drm_i915_private *dev_priv) +static unsigned int gmbus_max_xfer_size(struct drm_i915_private *i915) { - return DISPLAY_VER(dev_priv) >= 9 ? GEN9_GMBUS_BYTE_COUNT_MAX : + return DISPLAY_VER(i915) >= 9 ? GEN9_GMBUS_BYTE_COUNT_MAX : GMBUS_BYTE_COUNT_MAX; } static int -gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv, +gmbus_xfer_read_chunk(struct drm_i915_private *i915, unsigned short addr, u8 *buf, unsigned int len, u32 gmbus0_reg, u32 gmbus1_index) { unsigned int size = len; - bool burst_read = len > gmbus_max_xfer_size(dev_priv); + bool burst_read = len > gmbus_max_xfer_size(i915); bool extra_byte_added = false; if (burst_read) { @@ -438,21 +454,21 @@ gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv, len++; } size = len % 256 + 256; - intel_de_write_fw(dev_priv, GMBUS0, + intel_de_write_fw(i915, GMBUS0(i915), gmbus0_reg | GMBUS_BYTE_CNT_OVERRIDE); } - intel_de_write_fw(dev_priv, GMBUS1, + intel_de_write_fw(i915, GMBUS1(i915), gmbus1_index | GMBUS_CYCLE_WAIT | (size << GMBUS_BYTE_COUNT_SHIFT) | (addr << GMBUS_SLAVE_ADDR_SHIFT) | GMBUS_SLAVE_READ | GMBUS_SW_RDY); while (len) { int ret; u32 val, loop = 0; - ret = gmbus_wait(dev_priv, GMBUS_HW_RDY, GMBUS_HW_RDY_EN); + ret = gmbus_wait(i915, GMBUS_HW_RDY, GMBUS_HW_RDY_EN); if (ret) return ret; - val = intel_de_read_fw(dev_priv, GMBUS3); + val = intel_de_read_fw(i915, GMBUS3(i915)); do { if (extra_byte_added && len == 1) break; @@ -463,7 +479,7 @@ gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv, if (burst_read && len == size - 4) /* Reset the override bit */ - intel_de_write_fw(dev_priv, GMBUS0, gmbus0_reg); + intel_de_write_fw(i915, GMBUS0(i915), gmbus0_reg); } return 0; @@ -480,7 +496,7 @@ gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv, #define INTEL_GMBUS_BURST_READ_MAX_LEN 767U static int -gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg, +gmbus_xfer_read(struct drm_i915_private *i915, struct i2c_msg *msg, u32 gmbus0_reg, u32 gmbus1_index) { u8 *buf = msg->buf; @@ -489,12 +505,12 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg, int ret; do { - if (HAS_GMBUS_BURST_READ(dev_priv)) + if (HAS_GMBUS_BURST_READ(i915)) len = min(rx_size, INTEL_GMBUS_BURST_READ_MAX_LEN); else - len = min(rx_size, gmbus_max_xfer_size(dev_priv)); + len = min(rx_size, gmbus_max_xfer_size(i915)); - ret = gmbus_xfer_read_chunk(dev_priv, msg->addr, buf, len, + ret = gmbus_xfer_read_chunk(i915, msg->addr, buf, len, gmbus0_reg, gmbus1_index); if (ret) return ret; @@ -507,7 +523,7 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg, } static int -gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv, +gmbus_xfer_write_chunk(struct drm_i915_private *i915, unsigned short addr, u8 *buf, unsigned int len, u32 gmbus1_index) { @@ -520,8 +536,8 @@ gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv, len -= 1; } - intel_de_write_fw(dev_priv, GMBUS3, val); - intel_de_write_fw(dev_priv, GMBUS1, + intel_de_write_fw(i915, GMBUS3(i915), val); + intel_de_write_fw(i915, GMBUS1(i915), gmbus1_index | GMBUS_CYCLE_WAIT | (chunk_size << GMBUS_BYTE_COUNT_SHIFT) | (addr << GMBUS_SLAVE_ADDR_SHIFT) | GMBUS_SLAVE_WRITE | GMBUS_SW_RDY); while (len) { int ret; @@ -531,9 +547,9 @@ gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv, val |= *buf++ << (8 * loop); } while (--len && ++loop < 4); - intel_de_write_fw(dev_priv, GMBUS3, val); + intel_de_write_fw(i915, GMBUS3(i915), val); - ret = gmbus_wait(dev_priv, GMBUS_HW_RDY, GMBUS_HW_RDY_EN); + ret = gmbus_wait(i915, GMBUS_HW_RDY, GMBUS_HW_RDY_EN); if (ret) return ret; } @@ -542,7 +558,7 @@ gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv, } static int -gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg, +gmbus_xfer_write(struct drm_i915_private *i915, struct i2c_msg *msg, u32 gmbus1_index) { u8 *buf = msg->buf; @@ -551,9 +567,9 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg, int ret; do { - len = min(tx_size, gmbus_max_xfer_size(dev_priv)); + len = min(tx_size, gmbus_max_xfer_size(i915)); - ret = gmbus_xfer_write_chunk(dev_priv, msg->addr, buf, len, + ret = gmbus_xfer_write_chunk(i915, msg->addr, buf, len, gmbus1_index); if (ret) return ret; @@ -580,7 +596,7 @@ gmbus_is_index_xfer(struct i2c_msg *msgs, int i, int num) } static int -gmbus_index_xfer(struct drm_i915_private *dev_priv, struct i2c_msg *msgs, +gmbus_index_xfer(struct drm_i915_private *i915, struct i2c_msg *msgs, u32 gmbus0_reg) { u32 gmbus1_index = 0; @@ -596,17 +612,17 @@ gmbus_index_xfer(struct drm_i915_private *dev_priv, struct i2c_msg *msgs, /* GMBUS5 holds 16-bit index */ if (gmbus5) - intel_de_write_fw(dev_priv, GMBUS5, gmbus5); + intel_de_write_fw(i915, GMBUS5(i915), gmbus5); if (msgs[1].flags & I2C_M_RD) - ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus0_reg, + ret = gmbus_xfer_read(i915, &msgs[1], gmbus0_reg, gmbus1_index); else - ret = gmbus_xfer_write(dev_priv, &msgs[1], gmbus1_index); + ret = gmbus_xfer_write(i915, &msgs[1], gmbus1_index); /* Clear GMBUS5 after each index transfer */ if (gmbus5) - intel_de_write_fw(dev_priv, GMBUS5, 0); + intel_de_write_fw(i915, GMBUS5(i915), 0); return ret; } @@ -616,34 +632,34 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num, u32 gmbus0_source) { struct intel_gmbus *bus = to_intel_gmbus(adapter); - struct drm_i915_private *dev_priv = bus->dev_priv; + struct drm_i915_private *i915 = bus->i915; int i = 0, inc, try = 0; int ret = 0; /* Display WA #0868: skl,bxt,kbl,cfl,glk */ - if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) - bxt_gmbus_clock_gating(dev_priv, false); - else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_CNP(dev_priv)) - pch_gmbus_clock_gating(dev_priv, false); + if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) + bxt_gmbus_clock_gating(i915, false); + else if (HAS_PCH_SPT(i915) || HAS_PCH_CNP(i915)) + pch_gmbus_clock_gating(i915, false); retry: - intel_de_write_fw(dev_priv, GMBUS0, gmbus0_source | bus->reg0); + intel_de_write_fw(i915, GMBUS0(i915), gmbus0_source | bus->reg0); for (; i < num; i += inc) { inc = 1; if (gmbus_is_index_xfer(msgs, i, num)) { - ret = gmbus_index_xfer(dev_priv, &msgs[i], + ret = gmbus_index_xfer(i915, &msgs[i], gmbus0_source | bus->reg0); inc = 2; /* an index transmission is two msgs */ } else if (msgs[i].flags & I2C_M_RD) { - ret = gmbus_xfer_read(dev_priv, &msgs[i], + ret = gmbus_xfer_read(i915, &msgs[i], gmbus0_source | bus->reg0, 0); } else { - ret = gmbus_xfer_write(dev_priv, &msgs[i], 0); + ret = gmbus_xfer_write(i915, &msgs[i], 0); } if (!ret) - ret = gmbus_wait(dev_priv, + ret = gmbus_wait(i915, GMBUS_HW_WAIT_PHASE, GMBUS_HW_WAIT_EN); if (ret == -ETIMEDOUT) goto timeout; @@ -655,19 +671,19 @@ retry: * a STOP on the very first cycle. To simplify the code we * unconditionally generate the STOP condition with an additional gmbus * cycle. */ - intel_de_write_fw(dev_priv, GMBUS1, GMBUS_CYCLE_STOP | GMBUS_SW_RDY); + intel_de_write_fw(i915, GMBUS1(i915), GMBUS_CYCLE_STOP | GMBUS_SW_RDY); /* Mark the GMBUS interface as disabled after waiting for idle. * We will re-enable it at the start of the next xfer, * till then let it sleep. */ - if (gmbus_wait_idle(dev_priv)) { - drm_dbg_kms(&dev_priv->drm, + if (gmbus_wait_idle(i915)) { + drm_dbg_kms(&i915->drm, "GMBUS [%s] timed out waiting for idle\n", adapter->name); ret = -ETIMEDOUT; } - intel_de_write_fw(dev_priv, GMBUS0, 0); + intel_de_write_fw(i915, GMBUS0(i915), 0); ret = ret ?: i; goto out; @@ -686,8 +702,8 @@ clear_err: * it's slow responding and only answers on the 2nd retry. */ ret = -ENXIO; - if (gmbus_wait_idle(dev_priv)) { - drm_dbg_kms(&dev_priv->drm, + if (gmbus_wait_idle(i915)) { + drm_dbg_kms(&i915->drm, "GMBUS [%s] timed out after NAK\n", adapter->name); ret = -ETIMEDOUT; @@ -697,11 +713,11 @@ clear_err: * of resetting the GMBUS controller and so clearing the * BUS_ERROR raised by the slave's NAK. */ - intel_de_write_fw(dev_priv, GMBUS1, GMBUS_SW_CLR_INT); - intel_de_write_fw(dev_priv, GMBUS1, 0); - intel_de_write_fw(dev_priv, GMBUS0, 0); + intel_de_write_fw(i915, GMBUS1(i915), GMBUS_SW_CLR_INT); + intel_de_write_fw(i915, GMBUS1(i915), 0); + intel_de_write_fw(i915, GMBUS0(i915), 0); - drm_dbg_kms(&dev_priv->drm, "GMBUS [%s] NAK for addr: %04x %c(%d)\n", + drm_dbg_kms(&i915->drm, "GMBUS [%s] NAK for addr: %04x %c(%d)\n", adapter->name, msgs[i].addr, (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len); @@ -712,7 +728,7 @@ clear_err: * drm_do_probe_ddc_edid, which bails out on the first -ENXIO. */ if (ret == -ENXIO && i == 0 && try++ == 0) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(&i915->drm, "GMBUS [%s] NAK on first message, retry\n", adapter->name); goto retry; @@ -721,10 +737,10 @@ clear_err: goto out; timeout: - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(&i915->drm, "GMBUS [%s] timed out, falling back to bit banging on pin %d\n", bus->adapter.name, bus->reg0 & 0xff); - intel_de_write_fw(dev_priv, GMBUS0, 0); + intel_de_write_fw(i915, GMBUS0(i915), 0); /* * Hardware may not support GMBUS over these pins? Try GPIO bitbanging @@ -734,10 +750,10 @@ timeout: out: /* Display WA #0868: skl,bxt,kbl,cfl,glk */ - if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) - bxt_gmbus_clock_gating(dev_priv, true); - else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_CNP(dev_priv)) - pch_gmbus_clock_gating(dev_priv, true); + if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) + bxt_gmbus_clock_gating(i915, true); + else if (HAS_PCH_SPT(i915) || HAS_PCH_CNP(i915)) + pch_gmbus_clock_gating(i915, true); return ret; } @@ -746,11 +762,11 @@ static int gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) { struct intel_gmbus *bus = to_intel_gmbus(adapter); - struct drm_i915_private *dev_priv = bus->dev_priv; + struct drm_i915_private *i915 = bus->i915; intel_wakeref_t wakeref; int ret; - wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); + wakeref = intel_display_power_get(i915, POWER_DOMAIN_GMBUS); if (bus->force_bit) { ret = i2c_bit_algo.master_xfer(adapter, msgs, num); @@ -762,7 +778,7 @@ gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) bus->force_bit |= GMBUS_FORCE_BIT_RETRY; } - intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS, wakeref); + intel_display_power_put(i915, POWER_DOMAIN_GMBUS, wakeref); return ret; } @@ -770,7 +786,7 @@ gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) int intel_gmbus_output_aksv(struct i2c_adapter *adapter) { struct intel_gmbus *bus = to_intel_gmbus(adapter); - struct drm_i915_private *dev_priv = bus->dev_priv; + struct drm_i915_private *i915 = bus->i915; u8 cmd = DRM_HDCP_DDC_AKSV; u8 buf[DRM_HDCP_KSV_LEN] = { 0 }; struct i2c_msg msgs[] = { @@ -790,8 +806,8 @@ int intel_gmbus_output_aksv(struct i2c_adapter *adapter) intel_wakeref_t wakeref; int ret; - wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); - mutex_lock(&dev_priv->gmbus_mutex); + wakeref = intel_display_power_get(i915, POWER_DOMAIN_GMBUS); + mutex_lock(&i915->display.gmbus.mutex); /* * In order to output Aksv to the receiver, use an indexed write to @@ -800,8 +816,8 @@ int intel_gmbus_output_aksv(struct i2c_adapter *adapter) */ ret = do_gmbus_xfer(adapter, msgs, ARRAY_SIZE(msgs), GMBUS_AKSV_SELECT); - mutex_unlock(&dev_priv->gmbus_mutex); - intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS, wakeref); + mutex_unlock(&i915->display.gmbus.mutex); + intel_display_power_put(i915, POWER_DOMAIN_GMBUS, wakeref); return ret; } @@ -824,27 +840,27 @@ static void gmbus_lock_bus(struct i2c_adapter *adapter, unsigned int flags) { struct intel_gmbus *bus = to_intel_gmbus(adapter); - struct drm_i915_private *dev_priv = bus->dev_priv; + struct drm_i915_private *i915 = bus->i915; - mutex_lock(&dev_priv->gmbus_mutex); + mutex_lock(&i915->display.gmbus.mutex); } static int gmbus_trylock_bus(struct i2c_adapter *adapter, unsigned int flags) { struct intel_gmbus *bus = to_intel_gmbus(adapter); - struct drm_i915_private *dev_priv = bus->dev_priv; + struct drm_i915_private *i915 = bus->i915; - return mutex_trylock(&dev_priv->gmbus_mutex); + return mutex_trylock(&i915->display.gmbus.mutex); } static void gmbus_unlock_bus(struct i2c_adapter *adapter, unsigned int flags) { struct intel_gmbus *bus = to_intel_gmbus(adapter); - struct drm_i915_private *dev_priv = bus->dev_priv; + struct drm_i915_private *i915 = bus->i915; - mutex_unlock(&dev_priv->gmbus_mutex); + mutex_unlock(&i915->display.gmbus.mutex); } static const struct i2c_lock_operations gmbus_lock_ops = { @@ -855,31 +871,31 @@ static const struct i2c_lock_operations gmbus_lock_ops = { /** * intel_gmbus_setup - instantiate all Intel i2c GMBuses - * @dev_priv: i915 device private + * @i915: i915 device private */ -int intel_gmbus_setup(struct drm_i915_private *dev_priv) +int intel_gmbus_setup(struct drm_i915_private *i915) { - struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); + struct pci_dev *pdev = to_pci_dev(i915->drm.dev); unsigned int pin; int ret; - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE; - else if (!HAS_GMCH(dev_priv)) + if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) + i915->display.gmbus.mmio_base = VLV_DISPLAY_BASE; + else if (!HAS_GMCH(i915)) /* * Broxton uses the same PCH offsets for South Display Engine, * even though it doesn't have a PCH. */ - dev_priv->gpio_mmio_base = PCH_DISPLAY_BASE; + i915->display.gmbus.mmio_base = PCH_DISPLAY_BASE; - mutex_init(&dev_priv->gmbus_mutex); - init_waitqueue_head(&dev_priv->gmbus_wait_queue); + mutex_init(&i915->display.gmbus.mutex); + init_waitqueue_head(&i915->display.gmbus.wait_queue); - for (pin = 0; pin < ARRAY_SIZE(dev_priv->gmbus); pin++) { + for (pin = 0; pin < ARRAY_SIZE(i915->display.gmbus.bus); pin++) { const struct gmbus_pin *gmbus_pin; struct intel_gmbus *bus; - gmbus_pin = get_gmbus_pin(dev_priv, pin); + gmbus_pin = get_gmbus_pin(i915, pin); if (!gmbus_pin) continue; @@ -896,7 +912,7 @@ int intel_gmbus_setup(struct drm_i915_private *dev_priv) "i915 gmbus %s", gmbus_pin->name); bus->adapter.dev.parent = &pdev->dev; - bus->dev_priv = dev_priv; + bus->i915 = i915; bus->adapter.algo = &gmbus_algorithm; bus->adapter.lock_ops = &gmbus_lock_ops; @@ -911,10 +927,10 @@ int intel_gmbus_setup(struct drm_i915_private *dev_priv) bus->reg0 = pin | GMBUS_RATE_100KHZ; /* gmbus seems to be broken on i830 */ - if (IS_I830(dev_priv)) + if (IS_I830(i915)) bus->force_bit = 1; - intel_gpio_setup(bus, GPIO(gmbus_pin->gpio)); + intel_gpio_setup(bus, GPIO(i915, gmbus_pin->gpio)); ret = i2c_add_adapter(&bus->adapter); if (ret) { @@ -922,43 +938,43 @@ int intel_gmbus_setup(struct drm_i915_private *dev_priv) goto err; } - dev_priv->gmbus[pin] = bus; + i915->display.gmbus.bus[pin] = bus; } - intel_gmbus_reset(dev_priv); + intel_gmbus_reset(i915); return 0; err: - intel_gmbus_teardown(dev_priv); + intel_gmbus_teardown(i915); return ret; } -struct i2c_adapter *intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, +struct i2c_adapter *intel_gmbus_get_adapter(struct drm_i915_private *i915, unsigned int pin) { - if (drm_WARN_ON(&dev_priv->drm, pin >= ARRAY_SIZE(dev_priv->gmbus) || - !dev_priv->gmbus[pin])) + if (drm_WARN_ON(&i915->drm, pin >= ARRAY_SIZE(i915->display.gmbus.bus) || + !i915->display.gmbus.bus[pin])) return NULL; - return &dev_priv->gmbus[pin]->adapter; + return &i915->display.gmbus.bus[pin]->adapter; } void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit) { struct intel_gmbus *bus = to_intel_gmbus(adapter); - struct drm_i915_private *dev_priv = bus->dev_priv; + struct drm_i915_private *i915 = bus->i915; - mutex_lock(&dev_priv->gmbus_mutex); + mutex_lock(&i915->display.gmbus.mutex); bus->force_bit += force_bit ? 1 : -1; - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(&i915->drm, "%sabling bit-banging on %s. force bit now %d\n", force_bit ? "en" : "dis", adapter->name, bus->force_bit); - mutex_unlock(&dev_priv->gmbus_mutex); + mutex_unlock(&i915->display.gmbus.mutex); } bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) @@ -968,20 +984,20 @@ bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) return bus->force_bit; } -void intel_gmbus_teardown(struct drm_i915_private *dev_priv) +void intel_gmbus_teardown(struct drm_i915_private *i915) { unsigned int pin; - for (pin = 0; pin < ARRAY_SIZE(dev_priv->gmbus); pin++) { + for (pin = 0; pin < ARRAY_SIZE(i915->display.gmbus.bus); pin++) { struct intel_gmbus *bus; - bus = dev_priv->gmbus[pin]; + bus = i915->display.gmbus.bus[pin]; if (!bus) continue; i2c_del_adapter(&bus->adapter); kfree(bus); - dev_priv->gmbus[pin] = NULL; + i915->display.gmbus.bus[pin] = NULL; } } diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.h b/drivers/gpu/drm/i915/display/intel_gmbus.h index 8edc2e99cf53..20f704bd4e70 100644 --- a/drivers/gpu/drm/i915/display/intel_gmbus.h +++ b/drivers/gpu/drm/i915/display/intel_gmbus.h @@ -24,6 +24,7 @@ struct i2c_adapter; #define GMBUS_PIN_2_BXT 2 #define GMBUS_PIN_3_BXT 3 #define GMBUS_PIN_4_CNP 4 +#define GMBUS_PIN_5_MTP 5 #define GMBUS_PIN_9_TC1_ICP 9 #define GMBUS_PIN_10_TC2_ICP 10 #define GMBUS_PIN_11_TC3_ICP 11 diff --git a/drivers/gpu/drm/i915/display/intel_gmbus_regs.h b/drivers/gpu/drm/i915/display/intel_gmbus_regs.h new file mode 100644 index 000000000000..53aacbda983c --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_gmbus_regs.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __INTEL_GMBUS_REGS_H__ +#define __INTEL_GMBUS_REGS_H__ + +#include "i915_reg_defs.h" + +#define GMBUS_MMIO_BASE(__i915) ((__i915)->display.gmbus.mmio_base) + +#define GPIO(__i915, gpio) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5010 + 4 * (gpio)) +#define GPIO_CLOCK_DIR_MASK (1 << 0) +#define GPIO_CLOCK_DIR_IN (0 << 1) +#define GPIO_CLOCK_DIR_OUT (1 << 1) +#define GPIO_CLOCK_VAL_MASK (1 << 2) +#define GPIO_CLOCK_VAL_OUT (1 << 3) +#define GPIO_CLOCK_VAL_IN (1 << 4) +#define GPIO_CLOCK_PULLUP_DISABLE (1 << 5) +#define GPIO_DATA_DIR_MASK (1 << 8) +#define GPIO_DATA_DIR_IN (0 << 9) +#define GPIO_DATA_DIR_OUT (1 << 9) +#define GPIO_DATA_VAL_MASK (1 << 10) +#define GPIO_DATA_VAL_OUT (1 << 11) +#define GPIO_DATA_VAL_IN (1 << 12) +#define GPIO_DATA_PULLUP_DISABLE (1 << 13) + +/* clock/port select */ +#define GMBUS0(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5100) +#define GMBUS_AKSV_SELECT (1 << 11) +#define GMBUS_RATE_100KHZ (0 << 8) +#define GMBUS_RATE_50KHZ (1 << 8) +#define GMBUS_RATE_400KHZ (2 << 8) /* reserved on Pineview */ +#define GMBUS_RATE_1MHZ (3 << 8) /* reserved on Pineview */ +#define GMBUS_HOLD_EXT (1 << 7) /* 300ns hold time, rsvd on Pineview */ +#define GMBUS_BYTE_CNT_OVERRIDE (1 << 6) + +/* command/status */ +#define GMBUS1(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5104) +#define GMBUS_SW_CLR_INT (1 << 31) +#define GMBUS_SW_RDY (1 << 30) +#define GMBUS_ENT (1 << 29) /* enable timeout */ +#define GMBUS_CYCLE_NONE (0 << 25) +#define GMBUS_CYCLE_WAIT (1 << 25) +#define GMBUS_CYCLE_INDEX (2 << 25) +#define GMBUS_CYCLE_STOP (4 << 25) +#define GMBUS_BYTE_COUNT_SHIFT 16 +#define GMBUS_BYTE_COUNT_MAX 256U +#define GEN9_GMBUS_BYTE_COUNT_MAX 511U +#define GMBUS_SLAVE_INDEX_SHIFT 8 +#define GMBUS_SLAVE_ADDR_SHIFT 1 +#define GMBUS_SLAVE_READ (1 << 0) +#define GMBUS_SLAVE_WRITE (0 << 0) + +/* status */ +#define GMBUS2(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5108) +#define GMBUS_INUSE (1 << 15) +#define GMBUS_HW_WAIT_PHASE (1 << 14) +#define GMBUS_STALL_TIMEOUT (1 << 13) +#define GMBUS_INT (1 << 12) +#define GMBUS_HW_RDY (1 << 11) +#define GMBUS_SATOER (1 << 10) +#define GMBUS_ACTIVE (1 << 9) + +/* data buffer bytes 3-0 */ +#define GMBUS3(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x510c) + +/* interrupt mask (Pineview+) */ +#define GMBUS4(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5110) +#define GMBUS_SLAVE_TIMEOUT_EN (1 << 4) +#define GMBUS_NAK_EN (1 << 3) +#define GMBUS_IDLE_EN (1 << 2) +#define GMBUS_HW_WAIT_EN (1 << 1) +#define GMBUS_HW_RDY_EN (1 << 0) + +/* byte index */ +#define GMBUS5(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5120) +#define GMBUS_2BYTE_INDEX_EN (1 << 31) + +#endif /* __INTEL_GMBUS_REGS_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index 7dbc9f0bb24f..6406fd487ee5 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -23,6 +23,7 @@ #include "intel_display_power_well.h" #include "intel_display_types.h" #include "intel_hdcp.h" +#include "intel_hdcp_regs.h" #include "intel_pcode.h" #define KEY_LOAD_TRIES 5 @@ -209,12 +210,12 @@ bool intel_hdcp2_capable(struct intel_connector *connector) return false; /* MEI interface is solid */ - mutex_lock(&dev_priv->hdcp_comp_mutex); - if (!dev_priv->hdcp_comp_added || !dev_priv->hdcp_master) { - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_lock(&dev_priv->display.hdcp.comp_mutex); + if (!dev_priv->display.hdcp.comp_added || !dev_priv->display.hdcp.master) { + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return false; } - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); /* Sink's capability for HDCP2.2 */ hdcp->shim->hdcp_2_2_capable(dig_port, &capable); @@ -1131,8 +1132,8 @@ static void intel_hdcp_prop_work(struct work_struct *work) bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port) { - return INTEL_INFO(dev_priv)->display.has_hdcp && - (DISPLAY_VER(dev_priv) >= 12 || port < PORT_E); + return RUNTIME_INFO(dev_priv)->has_hdcp && + (DISPLAY_VER(dev_priv) >= 12 || port < PORT_E); } static int @@ -1145,11 +1146,11 @@ hdcp2_prepare_ake_init(struct intel_connector *connector, struct i915_hdcp_comp_master *comp; int ret; - mutex_lock(&dev_priv->hdcp_comp_mutex); - comp = dev_priv->hdcp_master; + mutex_lock(&dev_priv->display.hdcp.comp_mutex); + comp = dev_priv->display.hdcp.master; if (!comp || !comp->ops) { - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return -EINVAL; } @@ -1157,7 +1158,7 @@ hdcp2_prepare_ake_init(struct intel_connector *connector, if (ret) drm_dbg_kms(&dev_priv->drm, "Prepare_ake_init failed. %d\n", ret); - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return ret; } @@ -1175,11 +1176,11 @@ hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector, struct i915_hdcp_comp_master *comp; int ret; - mutex_lock(&dev_priv->hdcp_comp_mutex); - comp = dev_priv->hdcp_master; + mutex_lock(&dev_priv->display.hdcp.comp_mutex); + comp = dev_priv->display.hdcp.master; if (!comp || !comp->ops) { - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return -EINVAL; } @@ -1189,7 +1190,7 @@ hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector, if (ret < 0) drm_dbg_kms(&dev_priv->drm, "Verify rx_cert failed. %d\n", ret); - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return ret; } @@ -1203,18 +1204,18 @@ static int hdcp2_verify_hprime(struct intel_connector *connector, struct i915_hdcp_comp_master *comp; int ret; - mutex_lock(&dev_priv->hdcp_comp_mutex); - comp = dev_priv->hdcp_master; + mutex_lock(&dev_priv->display.hdcp.comp_mutex); + comp = dev_priv->display.hdcp.master; if (!comp || !comp->ops) { - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return -EINVAL; } ret = comp->ops->verify_hprime(comp->mei_dev, data, rx_hprime); if (ret < 0) drm_dbg_kms(&dev_priv->drm, "Verify hprime failed. %d\n", ret); - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return ret; } @@ -1229,11 +1230,11 @@ hdcp2_store_pairing_info(struct intel_connector *connector, struct i915_hdcp_comp_master *comp; int ret; - mutex_lock(&dev_priv->hdcp_comp_mutex); - comp = dev_priv->hdcp_master; + mutex_lock(&dev_priv->display.hdcp.comp_mutex); + comp = dev_priv->display.hdcp.master; if (!comp || !comp->ops) { - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return -EINVAL; } @@ -1241,7 +1242,7 @@ hdcp2_store_pairing_info(struct intel_connector *connector, if (ret < 0) drm_dbg_kms(&dev_priv->drm, "Store pairing info failed. %d\n", ret); - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return ret; } @@ -1256,11 +1257,11 @@ hdcp2_prepare_lc_init(struct intel_connector *connector, struct i915_hdcp_comp_master *comp; int ret; - mutex_lock(&dev_priv->hdcp_comp_mutex); - comp = dev_priv->hdcp_master; + mutex_lock(&dev_priv->display.hdcp.comp_mutex); + comp = dev_priv->display.hdcp.master; if (!comp || !comp->ops) { - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return -EINVAL; } @@ -1268,7 +1269,7 @@ hdcp2_prepare_lc_init(struct intel_connector *connector, if (ret < 0) drm_dbg_kms(&dev_priv->drm, "Prepare lc_init failed. %d\n", ret); - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return ret; } @@ -1283,11 +1284,11 @@ hdcp2_verify_lprime(struct intel_connector *connector, struct i915_hdcp_comp_master *comp; int ret; - mutex_lock(&dev_priv->hdcp_comp_mutex); - comp = dev_priv->hdcp_master; + mutex_lock(&dev_priv->display.hdcp.comp_mutex); + comp = dev_priv->display.hdcp.master; if (!comp || !comp->ops) { - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return -EINVAL; } @@ -1295,7 +1296,7 @@ hdcp2_verify_lprime(struct intel_connector *connector, if (ret < 0) drm_dbg_kms(&dev_priv->drm, "Verify L_Prime failed. %d\n", ret); - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return ret; } @@ -1309,11 +1310,11 @@ static int hdcp2_prepare_skey(struct intel_connector *connector, struct i915_hdcp_comp_master *comp; int ret; - mutex_lock(&dev_priv->hdcp_comp_mutex); - comp = dev_priv->hdcp_master; + mutex_lock(&dev_priv->display.hdcp.comp_mutex); + comp = dev_priv->display.hdcp.master; if (!comp || !comp->ops) { - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return -EINVAL; } @@ -1321,7 +1322,7 @@ static int hdcp2_prepare_skey(struct intel_connector *connector, if (ret < 0) drm_dbg_kms(&dev_priv->drm, "Get session key failed. %d\n", ret); - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return ret; } @@ -1338,11 +1339,11 @@ hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector, struct i915_hdcp_comp_master *comp; int ret; - mutex_lock(&dev_priv->hdcp_comp_mutex); - comp = dev_priv->hdcp_master; + mutex_lock(&dev_priv->display.hdcp.comp_mutex); + comp = dev_priv->display.hdcp.master; if (!comp || !comp->ops) { - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return -EINVAL; } @@ -1352,7 +1353,7 @@ hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector, if (ret < 0) drm_dbg_kms(&dev_priv->drm, "Verify rep topology failed. %d\n", ret); - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return ret; } @@ -1367,18 +1368,18 @@ hdcp2_verify_mprime(struct intel_connector *connector, struct i915_hdcp_comp_master *comp; int ret; - mutex_lock(&dev_priv->hdcp_comp_mutex); - comp = dev_priv->hdcp_master; + mutex_lock(&dev_priv->display.hdcp.comp_mutex); + comp = dev_priv->display.hdcp.master; if (!comp || !comp->ops) { - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return -EINVAL; } ret = comp->ops->verify_mprime(comp->mei_dev, data, stream_ready); if (ret < 0) drm_dbg_kms(&dev_priv->drm, "Verify mprime failed. %d\n", ret); - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return ret; } @@ -1391,11 +1392,11 @@ static int hdcp2_authenticate_port(struct intel_connector *connector) struct i915_hdcp_comp_master *comp; int ret; - mutex_lock(&dev_priv->hdcp_comp_mutex); - comp = dev_priv->hdcp_master; + mutex_lock(&dev_priv->display.hdcp.comp_mutex); + comp = dev_priv->display.hdcp.master; if (!comp || !comp->ops) { - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return -EINVAL; } @@ -1403,7 +1404,7 @@ static int hdcp2_authenticate_port(struct intel_connector *connector) if (ret < 0) drm_dbg_kms(&dev_priv->drm, "Enable hdcp auth failed. %d\n", ret); - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return ret; } @@ -1415,17 +1416,17 @@ static int hdcp2_close_mei_session(struct intel_connector *connector) struct i915_hdcp_comp_master *comp; int ret; - mutex_lock(&dev_priv->hdcp_comp_mutex); - comp = dev_priv->hdcp_master; + mutex_lock(&dev_priv->display.hdcp.comp_mutex); + comp = dev_priv->display.hdcp.master; if (!comp || !comp->ops) { - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return -EINVAL; } ret = comp->ops->close_hdcp_session(comp->mei_dev, &dig_port->hdcp_port_data); - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return ret; } @@ -2143,10 +2144,10 @@ static int i915_hdcp_component_bind(struct device *i915_kdev, struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev); drm_dbg(&dev_priv->drm, "I915 HDCP comp bind\n"); - mutex_lock(&dev_priv->hdcp_comp_mutex); - dev_priv->hdcp_master = (struct i915_hdcp_comp_master *)data; - dev_priv->hdcp_master->mei_dev = mei_kdev; - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_lock(&dev_priv->display.hdcp.comp_mutex); + dev_priv->display.hdcp.master = (struct i915_hdcp_comp_master *)data; + dev_priv->display.hdcp.master->mei_dev = mei_kdev; + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return 0; } @@ -2157,9 +2158,9 @@ static void i915_hdcp_component_unbind(struct device *i915_kdev, struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev); drm_dbg(&dev_priv->drm, "I915 HDCP comp unbind\n"); - mutex_lock(&dev_priv->hdcp_comp_mutex); - dev_priv->hdcp_master = NULL; - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_lock(&dev_priv->display.hdcp.comp_mutex); + dev_priv->display.hdcp.master = NULL; + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); } static const struct component_ops i915_hdcp_component_ops = { @@ -2250,19 +2251,19 @@ void intel_hdcp_component_init(struct drm_i915_private *dev_priv) if (!is_hdcp2_supported(dev_priv)) return; - mutex_lock(&dev_priv->hdcp_comp_mutex); - drm_WARN_ON(&dev_priv->drm, dev_priv->hdcp_comp_added); + mutex_lock(&dev_priv->display.hdcp.comp_mutex); + drm_WARN_ON(&dev_priv->drm, dev_priv->display.hdcp.comp_added); - dev_priv->hdcp_comp_added = true; - mutex_unlock(&dev_priv->hdcp_comp_mutex); + dev_priv->display.hdcp.comp_added = true; + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_component_ops, I915_COMPONENT_HDCP); if (ret < 0) { drm_dbg_kms(&dev_priv->drm, "Failed at component add(%d)\n", ret); - mutex_lock(&dev_priv->hdcp_comp_mutex); - dev_priv->hdcp_comp_added = false; - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_lock(&dev_priv->display.hdcp.comp_mutex); + dev_priv->display.hdcp.comp_added = false; + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return; } } @@ -2475,14 +2476,14 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state, void intel_hdcp_component_fini(struct drm_i915_private *dev_priv) { - mutex_lock(&dev_priv->hdcp_comp_mutex); - if (!dev_priv->hdcp_comp_added) { - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_lock(&dev_priv->display.hdcp.comp_mutex); + if (!dev_priv->display.hdcp.comp_added) { + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return; } - dev_priv->hdcp_comp_added = false; - mutex_unlock(&dev_priv->hdcp_comp_mutex); + dev_priv->display.hdcp.comp_added = false; + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); component_del(dev_priv->drm.dev, &i915_hdcp_component_ops); } diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_regs.h b/drivers/gpu/drm/i915/display/intel_hdcp_regs.h new file mode 100644 index 000000000000..2a3733e8966c --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_hdcp_regs.h @@ -0,0 +1,270 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __INTEL_HDCP_REGS_H__ +#define __INTEL_HDCP_REGS_H__ + +#include "i915_reg_defs.h" + +/* HDCP Key Registers */ +#define HDCP_KEY_CONF _MMIO(0x66c00) +#define HDCP_AKSV_SEND_TRIGGER REG_BIT(31) +#define HDCP_CLEAR_KEYS_TRIGGER REG_BIT(30) +#define HDCP_KEY_LOAD_TRIGGER REG_BIT(8) +#define HDCP_KEY_STATUS _MMIO(0x66c04) +#define HDCP_FUSE_IN_PROGRESS REG_BIT(7) +#define HDCP_FUSE_ERROR REG_BIT(6) +#define HDCP_FUSE_DONE REG_BIT(5) +#define HDCP_KEY_LOAD_STATUS REG_BIT(1) +#define HDCP_KEY_LOAD_DONE REG_BIT(0) +#define HDCP_AKSV_LO _MMIO(0x66c10) +#define HDCP_AKSV_HI _MMIO(0x66c14) + +/* HDCP Repeater Registers */ +#define HDCP_REP_CTL _MMIO(0x66d00) +#define HDCP_TRANSA_REP_PRESENT REG_BIT(31) +#define HDCP_TRANSB_REP_PRESENT REG_BIT(30) +#define HDCP_TRANSC_REP_PRESENT REG_BIT(29) +#define HDCP_TRANSD_REP_PRESENT REG_BIT(28) +#define HDCP_DDIB_REP_PRESENT REG_BIT(30) +#define HDCP_DDIA_REP_PRESENT REG_BIT(29) +#define HDCP_DDIC_REP_PRESENT REG_BIT(28) +#define HDCP_DDID_REP_PRESENT REG_BIT(27) +#define HDCP_DDIF_REP_PRESENT REG_BIT(26) +#define HDCP_DDIE_REP_PRESENT REG_BIT(25) +#define HDCP_TRANSA_SHA1_M0 (1 << 20) +#define HDCP_TRANSB_SHA1_M0 (2 << 20) +#define HDCP_TRANSC_SHA1_M0 (3 << 20) +#define HDCP_TRANSD_SHA1_M0 (4 << 20) +#define HDCP_DDIB_SHA1_M0 (1 << 20) +#define HDCP_DDIA_SHA1_M0 (2 << 20) +#define HDCP_DDIC_SHA1_M0 (3 << 20) +#define HDCP_DDID_SHA1_M0 (4 << 20) +#define HDCP_DDIF_SHA1_M0 (5 << 20) +#define HDCP_DDIE_SHA1_M0 (6 << 20) /* Bspec says 5? */ +#define HDCP_SHA1_BUSY REG_BIT(16) +#define HDCP_SHA1_READY REG_BIT(17) +#define HDCP_SHA1_COMPLETE REG_BIT(18) +#define HDCP_SHA1_V_MATCH REG_BIT(19) +#define HDCP_SHA1_TEXT_32 (1 << 1) +#define HDCP_SHA1_COMPLETE_HASH (2 << 1) +#define HDCP_SHA1_TEXT_24 (4 << 1) +#define HDCP_SHA1_TEXT_16 (5 << 1) +#define HDCP_SHA1_TEXT_8 (6 << 1) +#define HDCP_SHA1_TEXT_0 (7 << 1) +#define HDCP_SHA_V_PRIME_H0 _MMIO(0x66d04) +#define HDCP_SHA_V_PRIME_H1 _MMIO(0x66d08) +#define HDCP_SHA_V_PRIME_H2 _MMIO(0x66d0C) +#define HDCP_SHA_V_PRIME_H3 _MMIO(0x66d10) +#define HDCP_SHA_V_PRIME_H4 _MMIO(0x66d14) +#define HDCP_SHA_V_PRIME(h) _MMIO((0x66d04 + (h) * 4)) +#define HDCP_SHA_TEXT _MMIO(0x66d18) + +/* HDCP Auth Registers */ +#define _PORTA_HDCP_AUTHENC 0x66800 +#define _PORTB_HDCP_AUTHENC 0x66500 +#define _PORTC_HDCP_AUTHENC 0x66600 +#define _PORTD_HDCP_AUTHENC 0x66700 +#define _PORTE_HDCP_AUTHENC 0x66A00 +#define _PORTF_HDCP_AUTHENC 0x66900 +#define _PORT_HDCP_AUTHENC(port, x) _MMIO(_PICK(port, \ + _PORTA_HDCP_AUTHENC, \ + _PORTB_HDCP_AUTHENC, \ + _PORTC_HDCP_AUTHENC, \ + _PORTD_HDCP_AUTHENC, \ + _PORTE_HDCP_AUTHENC, \ + _PORTF_HDCP_AUTHENC) + (x)) +#define PORT_HDCP_CONF(port) _PORT_HDCP_AUTHENC(port, 0x0) +#define _TRANSA_HDCP_CONF 0x66400 +#define _TRANSB_HDCP_CONF 0x66500 +#define TRANS_HDCP_CONF(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_CONF, \ + _TRANSB_HDCP_CONF) +#define HDCP_CONF(dev_priv, trans, port) \ + (GRAPHICS_VER(dev_priv) >= 12 ? \ + TRANS_HDCP_CONF(trans) : \ + PORT_HDCP_CONF(port)) + +#define HDCP_CONF_CAPTURE_AN REG_BIT(0) +#define HDCP_CONF_AUTH_AND_ENC (REG_BIT(1) | REG_BIT(0)) +#define PORT_HDCP_ANINIT(port) _PORT_HDCP_AUTHENC(port, 0x4) +#define _TRANSA_HDCP_ANINIT 0x66404 +#define _TRANSB_HDCP_ANINIT 0x66504 +#define TRANS_HDCP_ANINIT(trans) _MMIO_TRANS(trans, \ + _TRANSA_HDCP_ANINIT, \ + _TRANSB_HDCP_ANINIT) +#define HDCP_ANINIT(dev_priv, trans, port) \ + (GRAPHICS_VER(dev_priv) >= 12 ? \ + TRANS_HDCP_ANINIT(trans) : \ + PORT_HDCP_ANINIT(port)) + +#define PORT_HDCP_ANLO(port) _PORT_HDCP_AUTHENC(port, 0x8) +#define _TRANSA_HDCP_ANLO 0x66408 +#define _TRANSB_HDCP_ANLO 0x66508 +#define TRANS_HDCP_ANLO(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_ANLO, \ + _TRANSB_HDCP_ANLO) +#define HDCP_ANLO(dev_priv, trans, port) \ + (GRAPHICS_VER(dev_priv) >= 12 ? \ + TRANS_HDCP_ANLO(trans) : \ + PORT_HDCP_ANLO(port)) + +#define PORT_HDCP_ANHI(port) _PORT_HDCP_AUTHENC(port, 0xC) +#define _TRANSA_HDCP_ANHI 0x6640C +#define _TRANSB_HDCP_ANHI 0x6650C +#define TRANS_HDCP_ANHI(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_ANHI, \ + _TRANSB_HDCP_ANHI) +#define HDCP_ANHI(dev_priv, trans, port) \ + (GRAPHICS_VER(dev_priv) >= 12 ? \ + TRANS_HDCP_ANHI(trans) : \ + PORT_HDCP_ANHI(port)) + +#define PORT_HDCP_BKSVLO(port) _PORT_HDCP_AUTHENC(port, 0x10) +#define _TRANSA_HDCP_BKSVLO 0x66410 +#define _TRANSB_HDCP_BKSVLO 0x66510 +#define TRANS_HDCP_BKSVLO(trans) _MMIO_TRANS(trans, \ + _TRANSA_HDCP_BKSVLO, \ + _TRANSB_HDCP_BKSVLO) +#define HDCP_BKSVLO(dev_priv, trans, port) \ + (GRAPHICS_VER(dev_priv) >= 12 ? \ + TRANS_HDCP_BKSVLO(trans) : \ + PORT_HDCP_BKSVLO(port)) + +#define PORT_HDCP_BKSVHI(port) _PORT_HDCP_AUTHENC(port, 0x14) +#define _TRANSA_HDCP_BKSVHI 0x66414 +#define _TRANSB_HDCP_BKSVHI 0x66514 +#define TRANS_HDCP_BKSVHI(trans) _MMIO_TRANS(trans, \ + _TRANSA_HDCP_BKSVHI, \ + _TRANSB_HDCP_BKSVHI) +#define HDCP_BKSVHI(dev_priv, trans, port) \ + (GRAPHICS_VER(dev_priv) >= 12 ? \ + TRANS_HDCP_BKSVHI(trans) : \ + PORT_HDCP_BKSVHI(port)) + +#define PORT_HDCP_RPRIME(port) _PORT_HDCP_AUTHENC(port, 0x18) +#define _TRANSA_HDCP_RPRIME 0x66418 +#define _TRANSB_HDCP_RPRIME 0x66518 +#define TRANS_HDCP_RPRIME(trans) _MMIO_TRANS(trans, \ + _TRANSA_HDCP_RPRIME, \ + _TRANSB_HDCP_RPRIME) +#define HDCP_RPRIME(dev_priv, trans, port) \ + (GRAPHICS_VER(dev_priv) >= 12 ? \ + TRANS_HDCP_RPRIME(trans) : \ + PORT_HDCP_RPRIME(port)) + +#define PORT_HDCP_STATUS(port) _PORT_HDCP_AUTHENC(port, 0x1C) +#define _TRANSA_HDCP_STATUS 0x6641C +#define _TRANSB_HDCP_STATUS 0x6651C +#define TRANS_HDCP_STATUS(trans) _MMIO_TRANS(trans, \ + _TRANSA_HDCP_STATUS, \ + _TRANSB_HDCP_STATUS) +#define HDCP_STATUS(dev_priv, trans, port) \ + (GRAPHICS_VER(dev_priv) >= 12 ? \ + TRANS_HDCP_STATUS(trans) : \ + PORT_HDCP_STATUS(port)) + +#define HDCP_STATUS_STREAM_A_ENC REG_BIT(31) +#define HDCP_STATUS_STREAM_B_ENC REG_BIT(30) +#define HDCP_STATUS_STREAM_C_ENC REG_BIT(29) +#define HDCP_STATUS_STREAM_D_ENC REG_BIT(28) +#define HDCP_STATUS_AUTH REG_BIT(21) +#define HDCP_STATUS_ENC REG_BIT(20) +#define HDCP_STATUS_RI_MATCH REG_BIT(19) +#define HDCP_STATUS_R0_READY REG_BIT(18) +#define HDCP_STATUS_AN_READY REG_BIT(17) +#define HDCP_STATUS_CIPHER REG_BIT(16) +#define HDCP_STATUS_FRAME_CNT(x) (((x) >> 8) & 0xff) + +/* HDCP2.2 Registers */ +#define _PORTA_HDCP2_BASE 0x66800 +#define _PORTB_HDCP2_BASE 0x66500 +#define _PORTC_HDCP2_BASE 0x66600 +#define _PORTD_HDCP2_BASE 0x66700 +#define _PORTE_HDCP2_BASE 0x66A00 +#define _PORTF_HDCP2_BASE 0x66900 +#define _PORT_HDCP2_BASE(port, x) _MMIO(_PICK((port), \ + _PORTA_HDCP2_BASE, \ + _PORTB_HDCP2_BASE, \ + _PORTC_HDCP2_BASE, \ + _PORTD_HDCP2_BASE, \ + _PORTE_HDCP2_BASE, \ + _PORTF_HDCP2_BASE) + (x)) + +#define PORT_HDCP2_AUTH(port) _PORT_HDCP2_BASE(port, 0x98) +#define _TRANSA_HDCP2_AUTH 0x66498 +#define _TRANSB_HDCP2_AUTH 0x66598 +#define TRANS_HDCP2_AUTH(trans) _MMIO_TRANS(trans, _TRANSA_HDCP2_AUTH, \ + _TRANSB_HDCP2_AUTH) +#define AUTH_LINK_AUTHENTICATED REG_BIT(31) +#define AUTH_LINK_TYPE REG_BIT(30) +#define AUTH_FORCE_CLR_INPUTCTR REG_BIT(19) +#define AUTH_CLR_KEYS REG_BIT(18) +#define HDCP2_AUTH(dev_priv, trans, port) \ + (GRAPHICS_VER(dev_priv) >= 12 ? \ + TRANS_HDCP2_AUTH(trans) : \ + PORT_HDCP2_AUTH(port)) + +#define PORT_HDCP2_CTL(port) _PORT_HDCP2_BASE(port, 0xB0) +#define _TRANSA_HDCP2_CTL 0x664B0 +#define _TRANSB_HDCP2_CTL 0x665B0 +#define TRANS_HDCP2_CTL(trans) _MMIO_TRANS(trans, _TRANSA_HDCP2_CTL, \ + _TRANSB_HDCP2_CTL) +#define CTL_LINK_ENCRYPTION_REQ REG_BIT(31) +#define HDCP2_CTL(dev_priv, trans, port) \ + (GRAPHICS_VER(dev_priv) >= 12 ? \ + TRANS_HDCP2_CTL(trans) : \ + PORT_HDCP2_CTL(port)) + +#define PORT_HDCP2_STATUS(port) _PORT_HDCP2_BASE(port, 0xB4) +#define _TRANSA_HDCP2_STATUS 0x664B4 +#define _TRANSB_HDCP2_STATUS 0x665B4 +#define TRANS_HDCP2_STATUS(trans) _MMIO_TRANS(trans, \ + _TRANSA_HDCP2_STATUS, \ + _TRANSB_HDCP2_STATUS) +#define LINK_TYPE_STATUS REG_BIT(22) +#define LINK_AUTH_STATUS REG_BIT(21) +#define LINK_ENCRYPTION_STATUS REG_BIT(20) +#define HDCP2_STATUS(dev_priv, trans, port) \ + (GRAPHICS_VER(dev_priv) >= 12 ? \ + TRANS_HDCP2_STATUS(trans) : \ + PORT_HDCP2_STATUS(port)) + +#define _PIPEA_HDCP2_STREAM_STATUS 0x668C0 +#define _PIPEB_HDCP2_STREAM_STATUS 0x665C0 +#define _PIPEC_HDCP2_STREAM_STATUS 0x666C0 +#define _PIPED_HDCP2_STREAM_STATUS 0x667C0 +#define PIPE_HDCP2_STREAM_STATUS(pipe) _MMIO(_PICK((pipe), \ + _PIPEA_HDCP2_STREAM_STATUS, \ + _PIPEB_HDCP2_STREAM_STATUS, \ + _PIPEC_HDCP2_STREAM_STATUS, \ + _PIPED_HDCP2_STREAM_STATUS)) + +#define _TRANSA_HDCP2_STREAM_STATUS 0x664C0 +#define _TRANSB_HDCP2_STREAM_STATUS 0x665C0 +#define TRANS_HDCP2_STREAM_STATUS(trans) _MMIO_TRANS(trans, \ + _TRANSA_HDCP2_STREAM_STATUS, \ + _TRANSB_HDCP2_STREAM_STATUS) +#define STREAM_ENCRYPTION_STATUS REG_BIT(31) +#define STREAM_TYPE_STATUS REG_BIT(30) +#define HDCP2_STREAM_STATUS(dev_priv, trans, port) \ + (GRAPHICS_VER(dev_priv) >= 12 ? \ + TRANS_HDCP2_STREAM_STATUS(trans) : \ + PIPE_HDCP2_STREAM_STATUS(pipe)) + +#define _PORTA_HDCP2_AUTH_STREAM 0x66F00 +#define _PORTB_HDCP2_AUTH_STREAM 0x66F04 +#define PORT_HDCP2_AUTH_STREAM(port) _MMIO_PORT(port, \ + _PORTA_HDCP2_AUTH_STREAM, \ + _PORTB_HDCP2_AUTH_STREAM) +#define _TRANSA_HDCP2_AUTH_STREAM 0x66F00 +#define _TRANSB_HDCP2_AUTH_STREAM 0x66F04 +#define TRANS_HDCP2_AUTH_STREAM(trans) _MMIO_TRANS(trans, \ + _TRANSA_HDCP2_AUTH_STREAM, \ + _TRANSB_HDCP2_AUTH_STREAM) +#define AUTH_STREAM_TYPE REG_BIT(31) +#define HDCP2_AUTH_STREAM(dev_priv, trans, port) \ + (GRAPHICS_VER(dev_priv) >= 12 ? \ + TRANS_HDCP2_AUTH_STREAM(trans) : \ + PORT_HDCP2_AUTH_STREAM(port)) + +#endif /* __INTEL_HDCP_REGS_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index ebd91aa69dd2..7816b2a33fee 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -50,6 +50,7 @@ #include "intel_dp.h" #include "intel_gmbus.h" #include "intel_hdcp.h" +#include "intel_hdcp_regs.h" #include "intel_hdmi.h" #include "intel_lspcon.h" #include "intel_panel.h" @@ -1891,7 +1892,7 @@ int intel_hdmi_tmds_clock(int clock, int bpc, bool ycbcr420_output) * 1.5x for 12bpc * 1.25x for 10bpc */ - return clock * bpc / 8; + return DIV_ROUND_CLOSEST(clock * bpc, 8); } static bool intel_hdmi_source_bpc_possible(struct drm_i915_private *i915, int bpc) @@ -2001,6 +2002,15 @@ intel_hdmi_mode_valid(struct drm_connector *connector, clock *= 2; } + /* + * HDMI2.1 requires higher resolution modes like 8k60, 4K120 to be + * enumerated only if FRL is supported. Current platforms do not support + * FRL so prune the higher resolution modes that require doctclock more + * than 600MHz. + */ + if (clock > 600000) + return MODE_CLOCK_HIGH; + ycbcr_420_only = drm_mode_is_420_only(&connector->display_info, mode); status = intel_hdmi_mode_clock_valid(connector, clock, has_hdmi_sink, ycbcr_420_only); diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c index 5f8b4f481cff..f7a2f485b177 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug.c @@ -119,13 +119,13 @@ intel_connector_hpd_pin(struct intel_connector *connector) * responsible for further action. * * The number of IRQs that are allowed within @HPD_STORM_DETECT_PERIOD is - * stored in @dev_priv->hotplug.hpd_storm_threshold which defaults to + * stored in @dev_priv->display.hotplug.hpd_storm_threshold which defaults to * @HPD_STORM_DEFAULT_THRESHOLD. Long IRQs count as +10 to this threshold, and * short IRQs count as +1. If this threshold is exceeded, it's considered an * IRQ storm and the IRQ state is set to @HPD_MARK_DISABLED. * * By default, most systems will only count long IRQs towards - * &dev_priv->hotplug.hpd_storm_threshold. However, some older systems also + * &dev_priv->display.hotplug.hpd_storm_threshold. However, some older systems also * suffer from short IRQ storms and must also track these. Because short IRQ * storms are naturally caused by sideband interactions with DP MST devices, * short IRQ detection is only enabled for systems without DP MST support. @@ -140,7 +140,7 @@ intel_connector_hpd_pin(struct intel_connector *connector) static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv, enum hpd_pin pin, bool long_hpd) { - struct i915_hotplug *hpd = &dev_priv->hotplug; + struct intel_hotplug *hpd = &dev_priv->display.hotplug; unsigned long start = hpd->stats[pin].last_jiffies; unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD); const int increment = long_hpd ? 10 : 1; @@ -148,7 +148,7 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv, bool storm = false; if (!threshold || - (!long_hpd && !dev_priv->hotplug.hpd_short_storm_enabled)) + (!long_hpd && !dev_priv->display.hotplug.hpd_short_storm_enabled)) return false; if (!time_in_range(jiffies, start, end)) { @@ -191,7 +191,7 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv) pin = intel_connector_hpd_pin(connector); if (pin == HPD_NONE || - dev_priv->hotplug.stats[pin].state != HPD_MARK_DISABLED) + dev_priv->display.hotplug.stats[pin].state != HPD_MARK_DISABLED) continue; drm_info(&dev_priv->drm, @@ -199,7 +199,7 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv) "switching from hotplug detection to polling\n", connector->base.name); - dev_priv->hotplug.stats[pin].state = HPD_DISABLED; + dev_priv->display.hotplug.stats[pin].state = HPD_DISABLED; connector->base.polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; hpd_disabled = true; @@ -209,7 +209,7 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv) /* Enable polling and queue hotplug re-enabling. */ if (hpd_disabled) { drm_kms_helper_poll_enable(dev); - mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work, + mod_delayed_work(system_wq, &dev_priv->display.hotplug.reenable_work, msecs_to_jiffies(HPD_STORM_REENABLE_DELAY)); } } @@ -218,7 +218,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) { struct drm_i915_private *dev_priv = container_of(work, typeof(*dev_priv), - hotplug.reenable_work.work); + display.hotplug.reenable_work.work); struct drm_device *dev = &dev_priv->drm; struct drm_connector_list_iter conn_iter; struct intel_connector *connector; @@ -233,7 +233,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) for_each_intel_connector_iter(connector, &conn_iter) { pin = intel_connector_hpd_pin(connector); if (pin == HPD_NONE || - dev_priv->hotplug.stats[pin].state != HPD_DISABLED) + dev_priv->display.hotplug.stats[pin].state != HPD_DISABLED) continue; if (connector->base.polled != connector->polled) @@ -245,8 +245,8 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) drm_connector_list_iter_end(&conn_iter); for_each_hpd_pin(pin) { - if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED) - dev_priv->hotplug.stats[pin].state = HPD_ENABLED; + if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED) + dev_priv->display.hotplug.stats[pin].state = HPD_ENABLED; } intel_hpd_irq_setup(dev_priv); @@ -297,16 +297,16 @@ static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder) static void i915_digport_work_func(struct work_struct *work) { struct drm_i915_private *dev_priv = - container_of(work, struct drm_i915_private, hotplug.dig_port_work); + container_of(work, struct drm_i915_private, display.hotplug.dig_port_work); u32 long_port_mask, short_port_mask; struct intel_encoder *encoder; u32 old_bits = 0; spin_lock_irq(&dev_priv->irq_lock); - long_port_mask = dev_priv->hotplug.long_port_mask; - dev_priv->hotplug.long_port_mask = 0; - short_port_mask = dev_priv->hotplug.short_port_mask; - dev_priv->hotplug.short_port_mask = 0; + long_port_mask = dev_priv->display.hotplug.long_port_mask; + dev_priv->display.hotplug.long_port_mask = 0; + short_port_mask = dev_priv->display.hotplug.short_port_mask; + dev_priv->display.hotplug.short_port_mask = 0; spin_unlock_irq(&dev_priv->irq_lock); for_each_intel_encoder(&dev_priv->drm, encoder) { @@ -335,9 +335,9 @@ static void i915_digport_work_func(struct work_struct *work) if (old_bits) { spin_lock_irq(&dev_priv->irq_lock); - dev_priv->hotplug.event_bits |= old_bits; + dev_priv->display.hotplug.event_bits |= old_bits; spin_unlock_irq(&dev_priv->irq_lock); - queue_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work, 0); + queue_delayed_work(system_wq, &dev_priv->display.hotplug.hotplug_work, 0); } } @@ -353,10 +353,10 @@ void intel_hpd_trigger_irq(struct intel_digital_port *dig_port) struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); spin_lock_irq(&i915->irq_lock); - i915->hotplug.short_port_mask |= BIT(dig_port->base.port); + i915->display.hotplug.short_port_mask |= BIT(dig_port->base.port); spin_unlock_irq(&i915->irq_lock); - queue_work(i915->hotplug.dp_wq, &i915->hotplug.dig_port_work); + queue_work(i915->display.hotplug.dp_wq, &i915->display.hotplug.dig_port_work); } /* @@ -366,7 +366,7 @@ static void i915_hotplug_work_func(struct work_struct *work) { struct drm_i915_private *dev_priv = container_of(work, struct drm_i915_private, - hotplug.hotplug_work.work); + display.hotplug.hotplug_work.work); struct drm_device *dev = &dev_priv->drm; struct drm_connector_list_iter conn_iter; struct intel_connector *connector; @@ -379,10 +379,10 @@ static void i915_hotplug_work_func(struct work_struct *work) spin_lock_irq(&dev_priv->irq_lock); - hpd_event_bits = dev_priv->hotplug.event_bits; - dev_priv->hotplug.event_bits = 0; - hpd_retry_bits = dev_priv->hotplug.retry_bits; - dev_priv->hotplug.retry_bits = 0; + hpd_event_bits = dev_priv->display.hotplug.event_bits; + dev_priv->display.hotplug.event_bits = 0; + hpd_retry_bits = dev_priv->display.hotplug.retry_bits; + dev_priv->display.hotplug.retry_bits = 0; /* Enable polling for connectors which had HPD IRQ storms */ intel_hpd_irq_storm_switch_to_polling(dev_priv); @@ -435,10 +435,10 @@ static void i915_hotplug_work_func(struct work_struct *work) retry &= ~changed; if (retry) { spin_lock_irq(&dev_priv->irq_lock); - dev_priv->hotplug.retry_bits |= retry; + dev_priv->display.hotplug.retry_bits |= retry; spin_unlock_irq(&dev_priv->irq_lock); - mod_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work, + mod_delayed_work(system_wq, &dev_priv->display.hotplug.hotplug_work, msecs_to_jiffies(HPD_RETRY_DELAY)); } } @@ -502,10 +502,10 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, if (long_hpd) { long_hpd_pulse_mask |= BIT(pin); - dev_priv->hotplug.long_port_mask |= BIT(port); + dev_priv->display.hotplug.long_port_mask |= BIT(port); } else { short_hpd_pulse_mask |= BIT(pin); - dev_priv->hotplug.short_port_mask |= BIT(port); + dev_priv->display.hotplug.short_port_mask |= BIT(port); } } @@ -516,7 +516,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, if (!(BIT(pin) & pin_mask)) continue; - if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED) { + if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED) { /* * On GMCH platforms the interrupt mask bits only * prevent irq generation, not the setting of the @@ -529,7 +529,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, continue; } - if (dev_priv->hotplug.stats[pin].state != HPD_ENABLED) + if (dev_priv->display.hotplug.stats[pin].state != HPD_ENABLED) continue; /* @@ -540,13 +540,13 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) { long_hpd = long_hpd_pulse_mask & BIT(pin); } else { - dev_priv->hotplug.event_bits |= BIT(pin); + dev_priv->display.hotplug.event_bits |= BIT(pin); long_hpd = true; queue_hp = true; } if (intel_hpd_irq_storm_detect(dev_priv, pin, long_hpd)) { - dev_priv->hotplug.event_bits &= ~BIT(pin); + dev_priv->display.hotplug.event_bits &= ~BIT(pin); storm_detected = true; queue_hp = true; } @@ -567,9 +567,9 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, * deadlock. */ if (queue_dig) - queue_work(dev_priv->hotplug.dp_wq, &dev_priv->hotplug.dig_port_work); + queue_work(dev_priv->display.hotplug.dp_wq, &dev_priv->display.hotplug.dig_port_work); if (queue_hp) - queue_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work, 0); + queue_delayed_work(system_wq, &dev_priv->display.hotplug.hotplug_work, 0); } /** @@ -594,8 +594,8 @@ void intel_hpd_init(struct drm_i915_private *dev_priv) return; for_each_hpd_pin(i) { - dev_priv->hotplug.stats[i].count = 0; - dev_priv->hotplug.stats[i].state = HPD_ENABLED; + dev_priv->display.hotplug.stats[i].count = 0; + dev_priv->display.hotplug.stats[i].state = HPD_ENABLED; } /* @@ -611,7 +611,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work) { struct drm_i915_private *dev_priv = container_of(work, struct drm_i915_private, - hotplug.poll_init_work); + display.hotplug.poll_init_work); struct drm_device *dev = &dev_priv->drm; struct drm_connector_list_iter conn_iter; struct intel_connector *connector; @@ -619,7 +619,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work) mutex_lock(&dev->mode_config.mutex); - enabled = READ_ONCE(dev_priv->hotplug.poll_enabled); + enabled = READ_ONCE(dev_priv->display.hotplug.poll_enabled); drm_connector_list_iter_begin(dev, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { @@ -672,7 +672,7 @@ void intel_hpd_poll_enable(struct drm_i915_private *dev_priv) !INTEL_DISPLAY_ENABLED(dev_priv)) return; - WRITE_ONCE(dev_priv->hotplug.poll_enabled, true); + WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, true); /* * We might already be holding dev->mode_config.mutex, so do this in a @@ -680,7 +680,7 @@ void intel_hpd_poll_enable(struct drm_i915_private *dev_priv) * As well, there's no issue if we race here since we always reschedule * this worker anyway */ - schedule_work(&dev_priv->hotplug.poll_init_work); + schedule_work(&dev_priv->display.hotplug.poll_init_work); } /** @@ -707,17 +707,17 @@ void intel_hpd_poll_disable(struct drm_i915_private *dev_priv) if (!HAS_DISPLAY(dev_priv)) return; - WRITE_ONCE(dev_priv->hotplug.poll_enabled, false); - schedule_work(&dev_priv->hotplug.poll_init_work); + WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, false); + schedule_work(&dev_priv->display.hotplug.poll_init_work); } void intel_hpd_init_work(struct drm_i915_private *dev_priv) { - INIT_DELAYED_WORK(&dev_priv->hotplug.hotplug_work, + INIT_DELAYED_WORK(&dev_priv->display.hotplug.hotplug_work, i915_hotplug_work_func); - INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func); - INIT_WORK(&dev_priv->hotplug.poll_init_work, i915_hpd_poll_init_work); - INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work, + INIT_WORK(&dev_priv->display.hotplug.dig_port_work, i915_digport_work_func); + INIT_WORK(&dev_priv->display.hotplug.poll_init_work, i915_hpd_poll_init_work); + INIT_DELAYED_WORK(&dev_priv->display.hotplug.reenable_work, intel_hpd_irq_storm_reenable_work); } @@ -728,17 +728,17 @@ void intel_hpd_cancel_work(struct drm_i915_private *dev_priv) spin_lock_irq(&dev_priv->irq_lock); - dev_priv->hotplug.long_port_mask = 0; - dev_priv->hotplug.short_port_mask = 0; - dev_priv->hotplug.event_bits = 0; - dev_priv->hotplug.retry_bits = 0; + dev_priv->display.hotplug.long_port_mask = 0; + dev_priv->display.hotplug.short_port_mask = 0; + dev_priv->display.hotplug.event_bits = 0; + dev_priv->display.hotplug.retry_bits = 0; spin_unlock_irq(&dev_priv->irq_lock); - cancel_work_sync(&dev_priv->hotplug.dig_port_work); - cancel_delayed_work_sync(&dev_priv->hotplug.hotplug_work); - cancel_work_sync(&dev_priv->hotplug.poll_init_work); - cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work); + cancel_work_sync(&dev_priv->display.hotplug.dig_port_work); + cancel_delayed_work_sync(&dev_priv->display.hotplug.hotplug_work); + cancel_work_sync(&dev_priv->display.hotplug.poll_init_work); + cancel_delayed_work_sync(&dev_priv->display.hotplug.reenable_work); } bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin) @@ -749,8 +749,8 @@ bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin) return false; spin_lock_irq(&dev_priv->irq_lock); - if (dev_priv->hotplug.stats[pin].state == HPD_ENABLED) { - dev_priv->hotplug.stats[pin].state = HPD_DISABLED; + if (dev_priv->display.hotplug.stats[pin].state == HPD_ENABLED) { + dev_priv->display.hotplug.stats[pin].state = HPD_DISABLED; ret = true; } spin_unlock_irq(&dev_priv->irq_lock); @@ -764,6 +764,6 @@ void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin) return; spin_lock_irq(&dev_priv->irq_lock); - dev_priv->hotplug.stats[pin].state = HPD_ENABLED; + dev_priv->display.hotplug.stats[pin].state = HPD_ENABLED; spin_unlock_irq(&dev_priv->irq_lock); } diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c index 4970bf146c4a..dca6003ccac8 100644 --- a/drivers/gpu/drm/i915/display/intel_lpe_audio.c +++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c @@ -73,8 +73,9 @@ #include "i915_drv.h" #include "intel_de.h" #include "intel_lpe_audio.h" +#include "intel_pci_config.h" -#define HAS_LPE_AUDIO(dev_priv) ((dev_priv)->audio.lpe.platdev != NULL) +#define HAS_LPE_AUDIO(dev_priv) ((dev_priv)->display.audio.lpe.platdev != NULL) static struct platform_device * lpe_audio_platdev_create(struct drm_i915_private *dev_priv) @@ -96,13 +97,13 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv) return ERR_PTR(-ENOMEM); } - rsc[0].start = rsc[0].end = dev_priv->audio.lpe.irq; + rsc[0].start = rsc[0].end = dev_priv->display.audio.lpe.irq; rsc[0].flags = IORESOURCE_IRQ; rsc[0].name = "hdmi-lpe-audio-irq"; - rsc[1].start = pci_resource_start(pdev, 0) + + rsc[1].start = pci_resource_start(pdev, GTTMMADR_BAR) + I915_HDMI_LPE_AUDIO_BASE; - rsc[1].end = pci_resource_start(pdev, 0) + + rsc[1].end = pci_resource_start(pdev, GTTMMADR_BAR) + I915_HDMI_LPE_AUDIO_BASE + I915_HDMI_LPE_AUDIO_SIZE - 1; rsc[1].flags = IORESOURCE_MEM; rsc[1].name = "hdmi-lpe-audio-mmio"; @@ -148,7 +149,7 @@ static void lpe_audio_platdev_destroy(struct drm_i915_private *dev_priv) * than us fiddle with its internals. */ - platform_device_unregister(dev_priv->audio.lpe.platdev); + platform_device_unregister(dev_priv->display.audio.lpe.platdev); } static void lpe_audio_irq_unmask(struct irq_data *d) @@ -167,7 +168,7 @@ static struct irq_chip lpe_audio_irqchip = { static int lpe_audio_irq_init(struct drm_i915_private *dev_priv) { - int irq = dev_priv->audio.lpe.irq; + int irq = dev_priv->display.audio.lpe.irq; drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)); irq_set_chip_and_handler_name(irq, @@ -204,15 +205,15 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv) { int ret; - dev_priv->audio.lpe.irq = irq_alloc_desc(0); - if (dev_priv->audio.lpe.irq < 0) { + dev_priv->display.audio.lpe.irq = irq_alloc_desc(0); + if (dev_priv->display.audio.lpe.irq < 0) { drm_err(&dev_priv->drm, "Failed to allocate IRQ desc: %d\n", - dev_priv->audio.lpe.irq); - ret = dev_priv->audio.lpe.irq; + dev_priv->display.audio.lpe.irq); + ret = dev_priv->display.audio.lpe.irq; goto err; } - drm_dbg(&dev_priv->drm, "irq = %d\n", dev_priv->audio.lpe.irq); + drm_dbg(&dev_priv->drm, "irq = %d\n", dev_priv->display.audio.lpe.irq); ret = lpe_audio_irq_init(dev_priv); @@ -223,10 +224,10 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv) goto err_free_irq; } - dev_priv->audio.lpe.platdev = lpe_audio_platdev_create(dev_priv); + dev_priv->display.audio.lpe.platdev = lpe_audio_platdev_create(dev_priv); - if (IS_ERR(dev_priv->audio.lpe.platdev)) { - ret = PTR_ERR(dev_priv->audio.lpe.platdev); + if (IS_ERR(dev_priv->display.audio.lpe.platdev)) { + ret = PTR_ERR(dev_priv->display.audio.lpe.platdev); drm_err(&dev_priv->drm, "Failed to create lpe audio platform device: %d\n", ret); @@ -241,10 +242,10 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv) return 0; err_free_irq: - irq_free_desc(dev_priv->audio.lpe.irq); + irq_free_desc(dev_priv->display.audio.lpe.irq); err: - dev_priv->audio.lpe.irq = -1; - dev_priv->audio.lpe.platdev = NULL; + dev_priv->display.audio.lpe.irq = -1; + dev_priv->display.audio.lpe.platdev = NULL; return ret; } @@ -262,7 +263,7 @@ void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv) if (!HAS_LPE_AUDIO(dev_priv)) return; - ret = generic_handle_irq(dev_priv->audio.lpe.irq); + ret = generic_handle_irq(dev_priv->display.audio.lpe.irq); if (ret) drm_err_ratelimited(&dev_priv->drm, "error handling LPE audio irq: %d\n", ret); @@ -303,10 +304,10 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv) lpe_audio_platdev_destroy(dev_priv); - irq_free_desc(dev_priv->audio.lpe.irq); + irq_free_desc(dev_priv->display.audio.lpe.irq); - dev_priv->audio.lpe.irq = -1; - dev_priv->audio.lpe.platdev = NULL; + dev_priv->display.audio.lpe.irq = -1; + dev_priv->display.audio.lpe.platdev = NULL; } /** @@ -333,7 +334,7 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv, if (!HAS_LPE_AUDIO(dev_priv)) return; - pdata = dev_get_platdata(&dev_priv->audio.lpe.platdev->dev); + pdata = dev_get_platdata(&dev_priv->display.audio.lpe.platdev->dev); ppdata = &pdata->port[port - PORT_B]; spin_lock_irqsave(&pdata->lpe_audio_slock, irqflags); @@ -361,7 +362,7 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv, } if (pdata->notify_audio_lpe) - pdata->notify_audio_lpe(dev_priv->audio.lpe.platdev, port - PORT_B); + pdata->notify_audio_lpe(dev_priv->display.audio.lpe.platdev, port - PORT_B); spin_unlock_irqrestore(&pdata->lpe_audio_slock, irqflags); } diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c index 730480ac3300..9aa38e8141b5 100644 --- a/drivers/gpu/drm/i915/display/intel_lvds.c +++ b/drivers/gpu/drm/i915/display/intel_lvds.c @@ -837,12 +837,12 @@ void intel_lvds_init(struct drm_i915_private *dev_priv) /* Skip init on machines we know falsely report LVDS */ if (dmi_check_system(intel_no_lvds)) { - drm_WARN(dev, !dev_priv->vbt.int_lvds_support, + drm_WARN(dev, !dev_priv->display.vbt.int_lvds_support, "Useless DMI match. Internal LVDS support disabled by VBT\n"); return; } - if (!dev_priv->vbt.int_lvds_support) { + if (!dev_priv->display.vbt.int_lvds_support) { drm_dbg_kms(&dev_priv->drm, "Internal LVDS support disabled by VBT\n"); return; diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c index f0e04d3904c6..cbfabd58b75a 100644 --- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c +++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c @@ -23,6 +23,7 @@ #include "intel_modeset_setup.h" #include "intel_pch_display.h" #include "intel_pm.h" +#include "skl_watermark.h" static void intel_crtc_disable_noatomic(struct intel_crtc *crtc, struct drm_modeset_acquire_ctx *ctx) @@ -30,11 +31,11 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc, struct intel_encoder *encoder; struct drm_i915_private *i915 = to_i915(crtc->base.dev); struct intel_bw_state *bw_state = - to_intel_bw_state(i915->bw_obj.state); + to_intel_bw_state(i915->display.bw.obj.state); struct intel_cdclk_state *cdclk_state = - to_intel_cdclk_state(i915->cdclk.obj.state); + to_intel_cdclk_state(i915->display.cdclk.obj.state); struct intel_dbuf_state *dbuf_state = - to_intel_dbuf_state(i915->dbuf.obj.state); + to_intel_dbuf_state(i915->display.dbuf.obj.state); struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); struct intel_plane *plane; @@ -70,7 +71,7 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc, drm_WARN_ON(&i915->drm, IS_ERR(temp_crtc_state) || ret); - i915->display->crtc_disable(to_intel_atomic_state(state), crtc); + i915->display.funcs.display->crtc_disable(to_intel_atomic_state(state), crtc); drm_atomic_state_put(state); @@ -415,9 +416,9 @@ static void readout_plane_state(struct drm_i915_private *i915) static void intel_modeset_readout_hw_state(struct drm_i915_private *i915) { struct intel_cdclk_state *cdclk_state = - to_intel_cdclk_state(i915->cdclk.obj.state); + to_intel_cdclk_state(i915->display.cdclk.obj.state); struct intel_dbuf_state *dbuf_state = - to_intel_dbuf_state(i915->dbuf.obj.state); + to_intel_dbuf_state(i915->display.dbuf.obj.state); enum pipe pipe; struct intel_crtc *crtc; struct intel_encoder *encoder; @@ -535,7 +536,7 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915) for_each_intel_crtc(&i915->drm, crtc) { struct intel_bw_state *bw_state = - to_intel_bw_state(i915->bw_obj.state); + to_intel_bw_state(i915->display.bw.obj.state); struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); struct intel_plane *plane; diff --git a/drivers/gpu/drm/i915/display/intel_modeset_verify.c b/drivers/gpu/drm/i915/display/intel_modeset_verify.c index a91586d77cb6..0fdcf2e6d57f 100644 --- a/drivers/gpu/drm/i915/display/intel_modeset_verify.c +++ b/drivers/gpu/drm/i915/display/intel_modeset_verify.c @@ -15,8 +15,8 @@ #include "intel_display_types.h" #include "intel_fdi.h" #include "intel_modeset_verify.h" -#include "intel_pm.h" #include "intel_snps_phy.h" +#include "skl_watermark.h" /* * Cross check the actual hw state with our own modeset state tracking (and its @@ -94,10 +94,10 @@ static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv, /* * FDI already provided one idea for the dotclock. - * Yell if the encoder disagrees. + * Yell if the encoder disagrees. Allow for slight + * rounding differences. */ - drm_WARN(&dev_priv->drm, - !intel_fuzzy_clock_check(fdi_dotclock, dotclock), + drm_WARN(&dev_priv->drm, abs(fdi_dotclock - dotclock) > 1, "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n", fdi_dotclock, dotclock); } diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c index 1c0c745c142d..caa07ef34f21 100644 --- a/drivers/gpu/drm/i915/display/intel_opregion.c +++ b/drivers/gpu/drm/i915/display/intel_opregion.c @@ -252,7 +252,7 @@ struct opregion_asle_ext { static int check_swsci_function(struct drm_i915_private *i915, u32 function) { - struct opregion_swsci *swsci = i915->opregion.swsci; + struct opregion_swsci *swsci = i915->display.opregion.swsci; u32 main_function, sub_function; if (!swsci) @@ -265,11 +265,11 @@ static int check_swsci_function(struct drm_i915_private *i915, u32 function) /* Check if we can call the function. See swsci_setup for details. */ if (main_function == SWSCI_SBCB) { - if ((i915->opregion.swsci_sbcb_sub_functions & + if ((i915->display.opregion.swsci_sbcb_sub_functions & (1 << sub_function)) == 0) return -EINVAL; } else if (main_function == SWSCI_GBDA) { - if ((i915->opregion.swsci_gbda_sub_functions & + if ((i915->display.opregion.swsci_gbda_sub_functions & (1 << sub_function)) == 0) return -EINVAL; } @@ -280,7 +280,7 @@ static int check_swsci_function(struct drm_i915_private *i915, u32 function) static int swsci(struct drm_i915_private *dev_priv, u32 function, u32 parm, u32 *parm_out) { - struct opregion_swsci *swsci = dev_priv->opregion.swsci; + struct opregion_swsci *swsci = dev_priv->display.opregion.swsci; struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); u32 scic, dslp; u16 swsci_val; @@ -462,7 +462,7 @@ static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp) { struct intel_connector *connector; struct drm_connector_list_iter conn_iter; - struct opregion_asle *asle = dev_priv->opregion.asle; + struct opregion_asle *asle = dev_priv->display.opregion.asle; struct drm_device *dev = &dev_priv->drm; drm_dbg(&dev_priv->drm, "bclp = 0x%08x\n", bclp); @@ -586,8 +586,8 @@ static void asle_work(struct work_struct *work) struct intel_opregion *opregion = container_of(work, struct intel_opregion, asle_work); struct drm_i915_private *dev_priv = - container_of(opregion, struct drm_i915_private, opregion); - struct opregion_asle *asle = dev_priv->opregion.asle; + container_of(opregion, struct drm_i915_private, display.opregion); + struct opregion_asle *asle = dev_priv->display.opregion.asle; u32 aslc_stat = 0; u32 aslc_req; @@ -635,8 +635,8 @@ static void asle_work(struct work_struct *work) void intel_opregion_asle_intr(struct drm_i915_private *dev_priv) { - if (dev_priv->opregion.asle) - schedule_work(&dev_priv->opregion.asle_work); + if (dev_priv->display.opregion.asle) + schedule_work(&dev_priv->display.opregion.asle_work); } #define ACPI_EV_DISPLAY_SWITCH (1<<0) @@ -692,7 +692,7 @@ static void set_did(struct intel_opregion *opregion, int i, u32 val) static void intel_didl_outputs(struct drm_i915_private *dev_priv) { - struct intel_opregion *opregion = &dev_priv->opregion; + struct intel_opregion *opregion = &dev_priv->display.opregion; struct intel_connector *connector; struct drm_connector_list_iter conn_iter; int i = 0, max_outputs; @@ -731,7 +731,7 @@ static void intel_didl_outputs(struct drm_i915_private *dev_priv) static void intel_setup_cadls(struct drm_i915_private *dev_priv) { - struct intel_opregion *opregion = &dev_priv->opregion; + struct intel_opregion *opregion = &dev_priv->display.opregion; struct intel_connector *connector; struct drm_connector_list_iter conn_iter; int i = 0; @@ -761,7 +761,7 @@ static void intel_setup_cadls(struct drm_i915_private *dev_priv) static void swsci_setup(struct drm_i915_private *dev_priv) { - struct intel_opregion *opregion = &dev_priv->opregion; + struct intel_opregion *opregion = &dev_priv->display.opregion; bool requested_callbacks = false; u32 tmp; @@ -839,7 +839,7 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = { static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv) { - struct intel_opregion *opregion = &dev_priv->opregion; + struct intel_opregion *opregion = &dev_priv->display.opregion; const struct firmware *fw = NULL; const char *name = dev_priv->params.vbt_firmware; int ret; @@ -879,7 +879,7 @@ static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv) int intel_opregion_setup(struct drm_i915_private *dev_priv) { - struct intel_opregion *opregion = &dev_priv->opregion; + struct intel_opregion *opregion = &dev_priv->display.opregion; struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); u32 asls, mboxes; char buf[sizeof(OPREGION_SIGNATURE)]; @@ -1106,7 +1106,7 @@ struct edid *intel_opregion_get_edid(struct intel_connector *intel_connector) { struct drm_connector *connector = &intel_connector->base; struct drm_i915_private *i915 = to_i915(connector->dev); - struct intel_opregion *opregion = &i915->opregion; + struct intel_opregion *opregion = &i915->display.opregion; const void *in_edid; const struct edid *edid; struct edid *new_edid; @@ -1141,7 +1141,7 @@ struct edid *intel_opregion_get_edid(struct intel_connector *intel_connector) bool intel_opregion_headless_sku(struct drm_i915_private *i915) { - struct intel_opregion *opregion = &i915->opregion; + struct intel_opregion *opregion = &i915->display.opregion; struct opregion_header *header = opregion->header; if (!header || header->over.major < 2 || @@ -1153,7 +1153,7 @@ bool intel_opregion_headless_sku(struct drm_i915_private *i915) void intel_opregion_register(struct drm_i915_private *i915) { - struct intel_opregion *opregion = &i915->opregion; + struct intel_opregion *opregion = &i915->display.opregion; if (!opregion->header) return; @@ -1169,7 +1169,7 @@ void intel_opregion_register(struct drm_i915_private *i915) void intel_opregion_resume(struct drm_i915_private *i915) { - struct intel_opregion *opregion = &i915->opregion; + struct intel_opregion *opregion = &i915->display.opregion; if (!opregion->header) return; @@ -1200,7 +1200,7 @@ void intel_opregion_resume(struct drm_i915_private *i915) void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state) { - struct intel_opregion *opregion = &i915->opregion; + struct intel_opregion *opregion = &i915->display.opregion; if (!opregion->header) return; @@ -1210,7 +1210,7 @@ void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state) if (opregion->asle) opregion->asle->ardy = ASLE_ARDY_NOT_READY; - cancel_work_sync(&i915->opregion.asle_work); + cancel_work_sync(&i915->display.opregion.asle_work); if (opregion->acpi) opregion->acpi->drdy = 0; @@ -1218,7 +1218,7 @@ void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state) void intel_opregion_unregister(struct drm_i915_private *i915) { - struct intel_opregion *opregion = &i915->opregion; + struct intel_opregion *opregion = &i915->display.opregion; intel_opregion_suspend(i915, PCI_D1); diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c index 79ed8bd04a07..c12bdca8da9b 100644 --- a/drivers/gpu/drm/i915/display/intel_overlay.c +++ b/drivers/gpu/drm/i915/display/intel_overlay.c @@ -211,9 +211,9 @@ static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv, /* WA_OVERLAY_CLKGATE:alm */ if (enable) - intel_de_write(dev_priv, DSPCLK_GATE_D, 0); + intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), 0); else - intel_de_write(dev_priv, DSPCLK_GATE_D, + intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), OVRUNIT_CLOCK_GATE_DISABLE); /* WA_DISABLE_L2CACHE_CLOCK_GATING:alm */ @@ -487,7 +487,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) void intel_overlay_reset(struct drm_i915_private *dev_priv) { - struct intel_overlay *overlay = dev_priv->overlay; + struct intel_overlay *overlay = dev_priv->display.overlay; if (!overlay) return; @@ -1113,7 +1113,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_object *new_bo; int ret; - overlay = dev_priv->overlay; + overlay = dev_priv->display.overlay; if (!overlay) { drm_dbg(&dev_priv->drm, "userspace bug: no overlay\n"); return -ENODEV; @@ -1273,7 +1273,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data, struct intel_overlay *overlay; int ret; - overlay = dev_priv->overlay; + overlay = dev_priv->display.overlay; if (!overlay) { drm_dbg(&dev_priv->drm, "userspace bug: no overlay\n"); return -ENODEV; @@ -1416,7 +1416,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv) update_polyphase_filter(overlay->regs); update_reg_attrs(overlay, overlay->regs); - dev_priv->overlay = overlay; + dev_priv->display.overlay = overlay; drm_info(&dev_priv->drm, "Initialized overlay support.\n"); return; @@ -1428,7 +1428,7 @@ void intel_overlay_cleanup(struct drm_i915_private *dev_priv) { struct intel_overlay *overlay; - overlay = fetch_and_zero(&dev_priv->overlay); + overlay = fetch_and_zero(&dev_priv->display.overlay); if (!overlay) return; @@ -1457,7 +1457,7 @@ struct intel_overlay_error_state { struct intel_overlay_error_state * intel_overlay_capture_error_state(struct drm_i915_private *dev_priv) { - struct intel_overlay *overlay = dev_priv->overlay; + struct intel_overlay *overlay = dev_priv->display.overlay; struct intel_overlay_error_state *error; if (!overlay || !overlay->active) diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c index 237a40623dd7..a3a3f9fe4342 100644 --- a/drivers/gpu/drm/i915/display/intel_panel.c +++ b/drivers/gpu/drm/i915/display/intel_panel.c @@ -37,13 +37,14 @@ #include "intel_display_types.h" #include "intel_drrs.h" #include "intel_panel.h" +#include "intel_quirks.h" bool intel_panel_use_ssc(struct drm_i915_private *i915) { if (i915->params.panel_use_ssc >= 0) return i915->params.panel_use_ssc != 0; - return i915->vbt.lvds_use_ssc - && !(i915->quirks & QUIRK_LVDS_SSC_DISABLE); + return i915->display.vbt.lvds_use_ssc && + !intel_has_quirk(i915, QUIRK_LVDS_SSC_DISABLE); } const struct drm_display_mode * @@ -81,15 +82,14 @@ static bool is_alt_drrs_mode(const struct drm_display_mode *mode, mode->clock != preferred_mode->clock; } -static bool is_alt_vrr_mode(const struct drm_display_mode *mode, - const struct drm_display_mode *preferred_mode) +static bool is_alt_fixed_mode(const struct drm_display_mode *mode, + const struct drm_display_mode *preferred_mode) { return drm_mode_match(mode, preferred_mode, DRM_MODE_MATCH_FLAGS | DRM_MODE_MATCH_3D_FLAGS) && mode->hdisplay == preferred_mode->hdisplay && - mode->vdisplay == preferred_mode->vdisplay && - mode->clock != preferred_mode->clock; + mode->vdisplay == preferred_mode->vdisplay; } const struct drm_display_mode * @@ -114,6 +114,21 @@ intel_panel_downclock_mode(struct intel_connector *connector, return best_mode; } +const struct drm_display_mode * +intel_panel_highest_mode(struct intel_connector *connector, + const struct drm_display_mode *adjusted_mode) +{ + const struct drm_display_mode *fixed_mode, *best_mode = adjusted_mode; + + /* pick the fixed_mode that has the highest clock */ + list_for_each_entry(fixed_mode, &connector->panel.fixed_modes, head) { + if (fixed_mode->clock > best_mode->clock) + best_mode = fixed_mode; + } + + return best_mode; +} + int intel_panel_get_modes(struct intel_connector *connector) { const struct drm_display_mode *fixed_mode; @@ -172,19 +187,7 @@ int intel_panel_compute_config(struct intel_connector *connector, return 0; } -static bool is_alt_fixed_mode(const struct drm_display_mode *mode, - const struct drm_display_mode *preferred_mode, - bool has_vrr) -{ - /* is_alt_drrs_mode() is a subset of is_alt_vrr_mode() */ - if (has_vrr) - return is_alt_vrr_mode(mode, preferred_mode); - else - return is_alt_drrs_mode(mode, preferred_mode); -} - -static void intel_panel_add_edid_alt_fixed_modes(struct intel_connector *connector, - bool has_vrr) +static void intel_panel_add_edid_alt_fixed_modes(struct intel_connector *connector) { struct drm_i915_private *dev_priv = to_i915(connector->base.dev); const struct drm_display_mode *preferred_mode = @@ -192,7 +195,7 @@ static void intel_panel_add_edid_alt_fixed_modes(struct intel_connector *connect struct drm_display_mode *mode, *next; list_for_each_entry_safe(mode, next, &connector->base.probed_modes, head) { - if (!is_alt_fixed_mode(mode, preferred_mode, has_vrr)) + if (!is_alt_fixed_mode(mode, preferred_mode)) continue; drm_dbg_kms(&dev_priv->drm, @@ -255,7 +258,7 @@ void intel_panel_add_edid_fixed_modes(struct intel_connector *connector, { intel_panel_add_edid_preferred_mode(connector); if (intel_panel_preferred_fixed_mode(connector) && (has_drrs || has_vrr)) - intel_panel_add_edid_alt_fixed_modes(connector, has_vrr); + intel_panel_add_edid_alt_fixed_modes(connector); intel_panel_destroy_probed_modes(connector); } diff --git a/drivers/gpu/drm/i915/display/intel_panel.h b/drivers/gpu/drm/i915/display/intel_panel.h index b087c0c3cc6d..eff3ffd3d082 100644 --- a/drivers/gpu/drm/i915/display/intel_panel.h +++ b/drivers/gpu/drm/i915/display/intel_panel.h @@ -31,6 +31,9 @@ intel_panel_fixed_mode(struct intel_connector *connector, const struct drm_display_mode * intel_panel_downclock_mode(struct intel_connector *connector, const struct drm_display_mode *adjusted_mode); +const struct drm_display_mode * +intel_panel_highest_mode(struct intel_connector *connector, + const struct drm_display_mode *adjusted_mode); int intel_panel_get_modes(struct intel_connector *connector); enum drrs_type intel_panel_drrs_type(struct intel_connector *connector); enum drm_mode_status diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.c b/drivers/gpu/drm/i915/display/intel_pch_refclk.c index 9934c8a9e240..a66097cdc1e0 100644 --- a/drivers/gpu/drm/i915/display/intel_pch_refclk.c +++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.c @@ -167,6 +167,15 @@ static void lpt_compute_iclkip(struct iclkip_params *p, int clock) } } +int lpt_iclkip(const struct intel_crtc_state *crtc_state) +{ + struct iclkip_params p; + + lpt_compute_iclkip(&p, crtc_state->hw.adjusted_mode.crtc_clock); + + return lpt_iclkip_freq(&p); +} + /* Program iCLKIP clock to the desired frequency */ void lpt_program_iclkip(const struct intel_crtc_state *crtc_state) { @@ -179,6 +188,7 @@ void lpt_program_iclkip(const struct intel_crtc_state *crtc_state) lpt_disable_iclkip(dev_priv); lpt_compute_iclkip(&p, clock); + drm_WARN_ON(&dev_priv->drm, lpt_iclkip_freq(&p) != clock); /* This should not happen with any sane values */ drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(p.divsel) & @@ -514,7 +524,7 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv) } if (HAS_PCH_IBX(dev_priv)) { - has_ck505 = dev_priv->vbt.display_clock_mode; + has_ck505 = dev_priv->display.vbt.display_clock_mode; can_ssc = has_ck505; } else { has_ck505 = false; @@ -522,7 +532,7 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv) } /* Check if any DPLLs are using the SSC source */ - for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) { + for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) { u32 temp = intel_de_read(dev_priv, PCH_DPLL(i)); if (!(temp & DPLL_VCO_ENABLE)) @@ -654,7 +664,7 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv) } } - BUG_ON(val != final); + drm_WARN_ON(&dev_priv->drm, val != final); } /* diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.h b/drivers/gpu/drm/i915/display/intel_pch_refclk.h index 12ab2c75a800..9bcf56629f24 100644 --- a/drivers/gpu/drm/i915/display/intel_pch_refclk.h +++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.h @@ -14,6 +14,7 @@ struct intel_crtc_state; void lpt_program_iclkip(const struct intel_crtc_state *crtc_state); void lpt_disable_iclkip(struct drm_i915_private *dev_priv); int lpt_get_iclkip(struct drm_i915_private *dev_priv); +int lpt_iclkip(const struct intel_crtc_state *crtc_state); void intel_init_pch_refclk(struct drm_i915_private *dev_priv); void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.c b/drivers/gpu/drm/i915/display/intel_plane_initial.c index d10f27d0b7b0..76be796df255 100644 --- a/drivers/gpu/drm/i915/display/intel_plane_initial.c +++ b/drivers/gpu/drm/i915/display/intel_plane_initial.c @@ -311,7 +311,7 @@ void intel_crtc_initial_plane_config(struct intel_crtc *crtc) * can even allow for smooth boot transitions if the BIOS * fb is large enough for the active pipe configuration. */ - dev_priv->display->get_initial_plane_config(crtc, &plane_config); + dev_priv->display.funcs.display->get_initial_plane_config(crtc, &plane_config); /* * If the fb is shared between multiple heads, we'll diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c index 1b21a341962f..21944f5bf3a8 100644 --- a/drivers/gpu/drm/i915/display/intel_pps.c +++ b/drivers/gpu/drm/i915/display/intel_pps.c @@ -12,6 +12,7 @@ #include "intel_dpll.h" #include "intel_lvds.h" #include "intel_pps.h" +#include "intel_quirks.h" static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv, enum pipe pipe); @@ -28,7 +29,7 @@ intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp) * See intel_pps_reset_all() why we need a power domain reference here. */ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE); - mutex_lock(&dev_priv->pps_mutex); + mutex_lock(&dev_priv->display.pps.mutex); return wakeref; } @@ -38,7 +39,7 @@ intel_wakeref_t intel_pps_unlock(struct intel_dp *intel_dp, { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - mutex_unlock(&dev_priv->pps_mutex); + mutex_unlock(&dev_priv->display.pps.mutex); intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); return 0; @@ -163,7 +164,7 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp) struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); enum pipe pipe; - lockdep_assert_held(&dev_priv->pps_mutex); + lockdep_assert_held(&dev_priv->display.pps.mutex); /* We should never land here with regular DP ports */ drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp)); @@ -212,7 +213,7 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp) struct intel_connector *connector = intel_dp->attached_connector; int backlight_controller = connector->panel.vbt.backlight.controller; - lockdep_assert_held(&dev_priv->pps_mutex); + lockdep_assert_held(&dev_priv->display.pps.mutex); /* We should never land here with regular DP ports */ drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp)); @@ -282,7 +283,7 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp) struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); enum port port = dig_port->base.port; - lockdep_assert_held(&dev_priv->pps_mutex); + lockdep_assert_held(&dev_priv->display.pps.mutex); /* try to find a pipe with this port selected */ /* first pick one where the panel is on */ @@ -407,7 +408,7 @@ static bool edp_have_panel_power(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - lockdep_assert_held(&dev_priv->pps_mutex); + lockdep_assert_held(&dev_priv->display.pps.mutex); if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && intel_dp->pps.pps_pipe == INVALID_PIPE) @@ -420,7 +421,7 @@ static bool edp_have_panel_vdd(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - lockdep_assert_held(&dev_priv->pps_mutex); + lockdep_assert_held(&dev_priv->display.pps.mutex); if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && intel_dp->pps.pps_pipe == INVALID_PIPE) @@ -463,7 +464,7 @@ static void wait_panel_status(struct intel_dp *intel_dp, struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); i915_reg_t pp_stat_reg, pp_ctrl_reg; - lockdep_assert_held(&dev_priv->pps_mutex); + lockdep_assert_held(&dev_priv->display.pps.mutex); intel_pps_verify_state(intel_dp); @@ -556,7 +557,7 @@ static u32 ilk_get_pp_control(struct intel_dp *intel_dp) struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u32 control; - lockdep_assert_held(&dev_priv->pps_mutex); + lockdep_assert_held(&dev_priv->display.pps.mutex); control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)); if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) && @@ -580,7 +581,7 @@ bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp) i915_reg_t pp_stat_reg, pp_ctrl_reg; bool need_to_disable = !intel_dp->pps.want_panel_vdd; - lockdep_assert_held(&dev_priv->pps_mutex); + lockdep_assert_held(&dev_priv->display.pps.mutex); if (!intel_dp_is_edp(intel_dp)) return false; @@ -657,7 +658,7 @@ static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp) u32 pp; i915_reg_t pp_stat_reg, pp_ctrl_reg; - lockdep_assert_held(&dev_priv->pps_mutex); + lockdep_assert_held(&dev_priv->display.pps.mutex); drm_WARN_ON(&dev_priv->drm, intel_dp->pps.want_panel_vdd); @@ -748,7 +749,7 @@ void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - lockdep_assert_held(&dev_priv->pps_mutex); + lockdep_assert_held(&dev_priv->display.pps.mutex); if (!intel_dp_is_edp(intel_dp)) return; @@ -771,7 +772,7 @@ void intel_pps_on_unlocked(struct intel_dp *intel_dp) u32 pp; i915_reg_t pp_ctrl_reg; - lockdep_assert_held(&dev_priv->pps_mutex); + lockdep_assert_held(&dev_priv->display.pps.mutex); if (!intel_dp_is_edp(intel_dp)) return; @@ -832,7 +833,7 @@ void intel_pps_off_unlocked(struct intel_dp *intel_dp) u32 pp; i915_reg_t pp_ctrl_reg; - lockdep_assert_held(&dev_priv->pps_mutex); + lockdep_assert_held(&dev_priv->display.pps.mutex); if (!intel_dp_is_edp(intel_dp)) return; @@ -991,7 +992,7 @@ static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv, { struct intel_encoder *encoder; - lockdep_assert_held(&dev_priv->pps_mutex); + lockdep_assert_held(&dev_priv->display.pps.mutex); for_each_intel_dp(&dev_priv->drm, encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); @@ -1021,7 +1022,7 @@ void vlv_pps_init(struct intel_encoder *encoder, struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - lockdep_assert_held(&dev_priv->pps_mutex); + lockdep_assert_held(&dev_priv->display.pps.mutex); drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE); @@ -1064,7 +1065,7 @@ static void pps_vdd_init(struct intel_dp *intel_dp) struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - lockdep_assert_held(&dev_priv->pps_mutex); + lockdep_assert_held(&dev_priv->display.pps.mutex); if (!edp_have_panel_vdd(intel_dp)) return; @@ -1176,7 +1177,7 @@ static void pps_init_delays_bios(struct intel_dp *intel_dp, { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - lockdep_assert_held(&dev_priv->pps_mutex); + lockdep_assert_held(&dev_priv->display.pps.mutex); if (!pps_delays_valid(&intel_dp->pps.bios_pps_delays)) intel_pps_readout_hw_state(intel_dp, &intel_dp->pps.bios_pps_delays); @@ -1202,7 +1203,7 @@ static void pps_init_delays_vbt(struct intel_dp *intel_dp, * just fails to power back on. Increasing the delay to 800ms * seems sufficient to avoid this problem. */ - if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) { + if (intel_has_quirk(dev_priv, QUIRK_INCREASE_T12_DELAY)) { vbt->t11_t12 = max_t(u16, vbt->t11_t12, 1300 * 10); drm_dbg_kms(&dev_priv->drm, "Increasing T12 panel delay as per the quirk to %d\n", @@ -1223,7 +1224,7 @@ static void pps_init_delays_spec(struct intel_dp *intel_dp, { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - lockdep_assert_held(&dev_priv->pps_mutex); + lockdep_assert_held(&dev_priv->display.pps.mutex); /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of * our hw here, which are all in 100usec. */ @@ -1246,7 +1247,7 @@ static void pps_init_delays(struct intel_dp *intel_dp) struct edp_power_seq cur, vbt, spec, *final = &intel_dp->pps.pps_delays; - lockdep_assert_held(&dev_priv->pps_mutex); + lockdep_assert_held(&dev_priv->display.pps.mutex); /* already initialized? */ if (pps_delays_valid(final)) @@ -1312,7 +1313,7 @@ static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd enum port port = dp_to_dig_port(intel_dp)->base.port; const struct edp_power_seq *seq = &intel_dp->pps.pps_delays; - lockdep_assert_held(&dev_priv->pps_mutex); + lockdep_assert_held(&dev_priv->display.pps.mutex); intel_pps_get_registers(intel_dp, ®s); @@ -1487,11 +1488,11 @@ void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv) void intel_pps_setup(struct drm_i915_private *i915) { if (HAS_PCH_SPLIT(i915) || IS_GEMINILAKE(i915) || IS_BROXTON(i915)) - i915->pps_mmio_base = PCH_PPS_BASE; + i915->display.pps.mmio_base = PCH_PPS_BASE; else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) - i915->pps_mmio_base = VLV_PPS_BASE; + i915->display.pps.mmio_base = VLV_PPS_BASE; else - i915->pps_mmio_base = PPS_BASE; + i915->display.pps.mmio_base = PPS_BASE; } void assert_pps_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe) diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index e6a870641cd2..9def8d9fade6 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -706,7 +706,7 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp, if (crtc_state->enable_psr2_sel_fetch) return; - if (!(dev_priv->dmc.allowed_dc_mask & DC_STATE_EN_DC3CO)) + if (!(dev_priv->display.dmc.allowed_dc_mask & DC_STATE_EN_DC3CO)) return; if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state)) @@ -805,13 +805,14 @@ static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_d hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start; hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock); - /* From spec: (72 / number of lanes) * 1000 / symbol clock frequency MHz */ - req_ns = (72 / crtc_state->lane_count) * 1000 / (crtc_state->port_clock / 1000); + /* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */ + req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000); if ((hblank_ns - req_ns) > 100) return true; - if (DISPLAY_VER(dev_priv) < 13 || intel_dp->edp_dpcd[0] < DP_EDP_14b) + /* Not supported <13 / Wa_22012279113:adl-p */ + if (DISPLAY_VER(dev_priv) <= 13 || intel_dp->edp_dpcd[0] < DP_EDP_14b) return false; crtc_state->req_psr2_sdp_prior_scanline = true; @@ -1721,8 +1722,6 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, new_plane_state, i) { struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1, .x2 = INT_MAX }; - struct drm_atomic_helper_damage_iter iter; - struct drm_rect clip; if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc) continue; @@ -1767,22 +1766,18 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, continue; } - drm_rect_fp_to_int(&src, &new_plane_state->uapi.src); + src = drm_plane_state_src(&new_plane_state->uapi); + drm_rect_fp_to_int(&src, &src); - drm_atomic_helper_damage_iter_init(&iter, - &old_plane_state->uapi, - &new_plane_state->uapi); - drm_atomic_for_each_plane_damage(&iter, &clip) { - if (drm_rect_intersect(&clip, &src)) - clip_area_update(&damaged_area, &clip, - &crtc_state->pipe_src); - } - - if (damaged_area.y1 == -1) + if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi, + &new_plane_state->uapi, &damaged_area)) continue; damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1; damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1; + damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1; + damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1; + clip_area_update(&pipe_clip, &damaged_area, &crtc_state->pipe_src); } @@ -1863,7 +1858,9 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(state->base.dev); - const struct intel_crtc_state *crtc_state = + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct intel_encoder *encoder; @@ -1871,7 +1868,7 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state, return; for_each_intel_encoder_mask_with_psr(state->base.dev, encoder, - crtc_state->uapi.encoder_mask) { + old_crtc_state->uapi.encoder_mask) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_psr *psr = &intel_dp->psr; bool needs_to_disable = false; @@ -1884,10 +1881,10 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state, * - All planes will go inactive * - Changing between PSR versions */ - needs_to_disable |= intel_crtc_needs_modeset(crtc_state); - needs_to_disable |= !crtc_state->has_psr; - needs_to_disable |= !crtc_state->active_planes; - needs_to_disable |= crtc_state->has_psr2 != psr->psr2_enabled; + needs_to_disable |= intel_crtc_needs_modeset(new_crtc_state); + needs_to_disable |= !new_crtc_state->has_psr; + needs_to_disable |= !new_crtc_state->active_planes; + needs_to_disable |= new_crtc_state->has_psr2 != psr->psr2_enabled; if (psr->enabled && needs_to_disable) intel_psr_disable_locked(intel_dp); diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c index c8488f5ebd04..6e48d3bcdfec 100644 --- a/drivers/gpu/drm/i915/display/intel_quirks.c +++ b/drivers/gpu/drm/i915/display/intel_quirks.c @@ -9,12 +9,17 @@ #include "intel_display_types.h" #include "intel_quirks.h" +static void intel_set_quirk(struct drm_i915_private *i915, enum intel_quirk_id quirk) +{ + i915->display.quirks.mask |= BIT(quirk); +} + /* * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason */ static void quirk_ssc_force_disable(struct drm_i915_private *i915) { - i915->quirks |= QUIRK_LVDS_SSC_DISABLE; + intel_set_quirk(i915, QUIRK_LVDS_SSC_DISABLE); drm_info(&i915->drm, "applying lvds SSC disable quirk\n"); } @@ -24,14 +29,14 @@ static void quirk_ssc_force_disable(struct drm_i915_private *i915) */ static void quirk_invert_brightness(struct drm_i915_private *i915) { - i915->quirks |= QUIRK_INVERT_BRIGHTNESS; + intel_set_quirk(i915, QUIRK_INVERT_BRIGHTNESS); drm_info(&i915->drm, "applying inverted panel brightness quirk\n"); } /* Some VBT's incorrectly indicate no backlight is present */ static void quirk_backlight_present(struct drm_i915_private *i915) { - i915->quirks |= QUIRK_BACKLIGHT_PRESENT; + intel_set_quirk(i915, QUIRK_BACKLIGHT_PRESENT); drm_info(&i915->drm, "applying backlight present quirk\n"); } @@ -40,7 +45,7 @@ static void quirk_backlight_present(struct drm_i915_private *i915) */ static void quirk_increase_t12_delay(struct drm_i915_private *i915) { - i915->quirks |= QUIRK_INCREASE_T12_DELAY; + intel_set_quirk(i915, QUIRK_INCREASE_T12_DELAY); drm_info(&i915->drm, "Applying T12 delay quirk\n"); } @@ -50,13 +55,13 @@ static void quirk_increase_t12_delay(struct drm_i915_private *i915) */ static void quirk_increase_ddi_disabled_time(struct drm_i915_private *i915) { - i915->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME; + intel_set_quirk(i915, QUIRK_INCREASE_DDI_DISABLED_TIME); drm_info(&i915->drm, "Applying Increase DDI Disabled quirk\n"); } static void quirk_no_pps_backlight_power_hook(struct drm_i915_private *i915) { - i915->quirks |= QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK; + intel_set_quirk(i915, QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK); drm_info(&i915->drm, "Applying no pps backlight power quirk\n"); } @@ -191,6 +196,9 @@ static struct intel_quirk intel_quirks[] = { /* ASRock ITX*/ { 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time }, { 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time }, + /* ECS Liva Q2 */ + { 0x3185, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time }, + { 0x3184, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time }, }; void intel_init_quirks(struct drm_i915_private *i915) @@ -213,3 +221,8 @@ void intel_init_quirks(struct drm_i915_private *i915) intel_dmi_quirks[i].hook(i915); } } + +bool intel_has_quirk(struct drm_i915_private *i915, enum intel_quirk_id quirk) +{ + return i915->display.quirks.mask & BIT(quirk); +} diff --git a/drivers/gpu/drm/i915/display/intel_quirks.h b/drivers/gpu/drm/i915/display/intel_quirks.h index b0fcff142a56..10a4d163149f 100644 --- a/drivers/gpu/drm/i915/display/intel_quirks.h +++ b/drivers/gpu/drm/i915/display/intel_quirks.h @@ -6,8 +6,20 @@ #ifndef __INTEL_QUIRKS_H__ #define __INTEL_QUIRKS_H__ +#include <linux/types.h> + struct drm_i915_private; -void intel_init_quirks(struct drm_i915_private *dev_priv); +enum intel_quirk_id { + QUIRK_BACKLIGHT_PRESENT, + QUIRK_INCREASE_DDI_DISABLED_TIME, + QUIRK_INCREASE_T12_DELAY, + QUIRK_INVERT_BRIGHTNESS, + QUIRK_LVDS_SSC_DISABLE, + QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK, +}; + +void intel_init_quirks(struct drm_i915_private *i915); +bool intel_has_quirk(struct drm_i915_private *i915, enum intel_quirk_id quirk); #endif /* __INTEL_QUIRKS_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index 19122bc6d2ab..f5b744bef18f 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -2016,7 +2016,7 @@ intel_sdvo_get_analog_edid(struct drm_connector *connector) return drm_get_edid(connector, intel_gmbus_get_adapter(dev_priv, - dev_priv->vbt.crt_ddc_pin)); + dev_priv->display.vbt.crt_ddc_pin)); } static enum drm_connector_status @@ -2581,9 +2581,9 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv, struct sdvo_device_mapping *mapping; if (sdvo->port == PORT_B) - mapping = &dev_priv->vbt.sdvo_mappings[0]; + mapping = &dev_priv->display.vbt.sdvo_mappings[0]; else - mapping = &dev_priv->vbt.sdvo_mappings[1]; + mapping = &dev_priv->display.vbt.sdvo_mappings[1]; if (mapping->initialized) sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4); @@ -2599,9 +2599,9 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv, u8 pin; if (sdvo->port == PORT_B) - mapping = &dev_priv->vbt.sdvo_mappings[0]; + mapping = &dev_priv->display.vbt.sdvo_mappings[0]; else - mapping = &dev_priv->vbt.sdvo_mappings[1]; + mapping = &dev_priv->display.vbt.sdvo_mappings[1]; if (mapping->initialized && intel_gmbus_is_valid_pin(dev_priv, mapping->i2c_pin)) @@ -2639,11 +2639,11 @@ intel_sdvo_get_slave_addr(struct drm_i915_private *dev_priv, struct sdvo_device_mapping *my_mapping, *other_mapping; if (sdvo->port == PORT_B) { - my_mapping = &dev_priv->vbt.sdvo_mappings[0]; - other_mapping = &dev_priv->vbt.sdvo_mappings[1]; + my_mapping = &dev_priv->display.vbt.sdvo_mappings[0]; + other_mapping = &dev_priv->display.vbt.sdvo_mappings[1]; } else { - my_mapping = &dev_priv->vbt.sdvo_mappings[1]; - other_mapping = &dev_priv->vbt.sdvo_mappings[0]; + my_mapping = &dev_priv->display.vbt.sdvo_mappings[1]; + other_mapping = &dev_priv->display.vbt.sdvo_mappings[0]; } /* If the BIOS described our SDVO device, take advantage of it. */ diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c index 0bdbedc67d7d..937cefd6f78f 100644 --- a/drivers/gpu/drm/i915/display/intel_snps_phy.c +++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c @@ -518,6 +518,1086 @@ static const struct intel_mpllb_state dg2_hdmi_148_5 = { }; /* values in the below table are calculted using the algo */ +static const struct intel_mpllb_state dg2_hdmi_25200 = { + .clock = 25200, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 5) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 128) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 41943) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 2621), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_27027 = { + .clock = 27027, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 5) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 140) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 31876) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 46555), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_28320 = { + .clock = 28320, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 5) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 148) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 40894) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 30408), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_30240 = { + .clock = 30240, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 5) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 160) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 50331) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 42466), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_31500 = { + .clock = 31500, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 4) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 68) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 26214), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_36000 = { + .clock = 36000, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 4) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 82) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 39321) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 39320), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_40000 = { + .clock = 40000, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 4) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 0) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 96) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 0) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 0) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 0), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_49500 = { + .clock = 49500, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 4) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 1), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 126) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 13107) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13107), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_50000 = { + .clock = 50000, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 4) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 0) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 1), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 128) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 0) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 0) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 0), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_57284 = { + .clock = 57284, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 4) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 150) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 42886) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 49701), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_58000 = { + .clock = 58000, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 4) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 152) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 52428) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 52427), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_65000 = { + .clock = 65000, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 0) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 72) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 0) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 0) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 0), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_71000 = { + .clock = 71000, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 80) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 52428) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 52427), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_74176 = { + .clock = 74176, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 86) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 22334) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 43829), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_75000 = { + .clock = 75000, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 0) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 88) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 0) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 0) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 0), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_78750 = { + .clock = 78750, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 0) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 94) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 0) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 0) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 0), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_85500 = { + .clock = 85500, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 104) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 26214), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_88750 = { + .clock = 88750, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 15) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 0) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 1), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 110) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 0) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 0) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 0), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_106500 = { + .clock = 106500, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 138) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 13107) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13107), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_108000 = { + .clock = 108000, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 140) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 26214), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_115500 = { + .clock = 115500, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 152) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 26214), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_119000 = { + .clock = 119000, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 158) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 13107) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13107), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_135000 = { + .clock = 135000, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 15) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 0) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 76) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 0) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 0) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 0), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_138500 = { + .clock = 138500, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 78) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 26214), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_147160 = { + .clock = 147160, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 84) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 56623) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 6815), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_148352 = { + .clock = 148352, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 86) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 22334) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 43829), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_154000 = { + .clock = 154000, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 13) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 90) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 39321) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 39320), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_162000 = { + .clock = 162000, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 96) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 52428) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 52427), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_209800 = { + .clock = 209800, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 134) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 60293) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 7864), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_262750 = { + .clock = 262750, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 72) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 36044) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 52427), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_268500 = { + .clock = 268500, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 74) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 45875) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13107), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_296703 = { + .clock = 296703, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 86) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 22321) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 36804), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_241500 = { + .clock = 241500, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 160) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 39321) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 39320), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_497750 = { + .clock = 497750, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 15) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 166) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 36044) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 52427), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_592000 = { + .clock = 592000, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 0) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 86) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 13107) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13107), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_593407 = { + .clock = 593407, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 0) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 86) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 22328) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 7549), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + static const struct intel_mpllb_state dg2_hdmi_297 = { .clock = 297000, .ref_control = @@ -584,6 +1664,42 @@ static const struct intel_mpllb_state * const dg2_hdmi_tables[] = { &dg2_hdmi_148_5, &dg2_hdmi_297, &dg2_hdmi_594, + &dg2_hdmi_25200, + &dg2_hdmi_27027, + &dg2_hdmi_28320, + &dg2_hdmi_30240, + &dg2_hdmi_31500, + &dg2_hdmi_36000, + &dg2_hdmi_40000, + &dg2_hdmi_49500, + &dg2_hdmi_50000, + &dg2_hdmi_57284, + &dg2_hdmi_58000, + &dg2_hdmi_65000, + &dg2_hdmi_71000, + &dg2_hdmi_74176, + &dg2_hdmi_75000, + &dg2_hdmi_78750, + &dg2_hdmi_85500, + &dg2_hdmi_88750, + &dg2_hdmi_106500, + &dg2_hdmi_108000, + &dg2_hdmi_115500, + &dg2_hdmi_119000, + &dg2_hdmi_135000, + &dg2_hdmi_138500, + &dg2_hdmi_147160, + &dg2_hdmi_148352, + &dg2_hdmi_154000, + &dg2_hdmi_162000, + &dg2_hdmi_209800, + &dg2_hdmi_241500, + &dg2_hdmi_262750, + &dg2_hdmi_268500, + &dg2_hdmi_296703, + &dg2_hdmi_497750, + &dg2_hdmi_592000, + &dg2_hdmi_593407, NULL, }; diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index 6773840f6cc7..e5af955b5600 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -246,7 +246,7 @@ static u32 icl_tc_port_live_status_mask(struct intel_digital_port *dig_port) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); struct intel_uncore *uncore = &i915->uncore; - u32 isr_bit = i915->hotplug.pch_hpd[dig_port->base.hpd_pin]; + u32 isr_bit = i915->display.hotplug.pch_hpd[dig_port->base.hpd_pin]; u32 mask = 0; u32 val; @@ -279,7 +279,7 @@ static u32 adl_tc_port_live_status_mask(struct intel_digital_port *dig_port) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port); - u32 isr_bit = i915->hotplug.pch_hpd[dig_port->base.hpd_pin]; + u32 isr_bit = i915->display.hotplug.pch_hpd[dig_port->base.hpd_pin]; struct intel_uncore *uncore = &i915->uncore; u32 val, mask = 0; diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c index 9379f3463344..dcf89d701f0f 100644 --- a/drivers/gpu/drm/i915/display/intel_tv.c +++ b/drivers/gpu/drm/i915/display/intel_tv.c @@ -39,6 +39,7 @@ #include "intel_crtc.h" #include "intel_de.h" #include "intel_display_types.h" +#include "intel_dpll.h" #include "intel_hotplug.h" #include "intel_tv.h" @@ -982,10 +983,10 @@ intel_tv_mode_vdisplay(const struct tv_mode *tv_mode) static void intel_tv_mode_to_mode(struct drm_display_mode *mode, - const struct tv_mode *tv_mode) + const struct tv_mode *tv_mode, + int clock) { - mode->clock = tv_mode->clock / - (tv_mode->oversample >> !tv_mode->progressive); + mode->clock = clock / (tv_mode->oversample >> !tv_mode->progressive); /* * tv_mode horizontal timings: @@ -1143,7 +1144,7 @@ intel_tv_get_config(struct intel_encoder *encoder, xsize = tmp >> 16; ysize = tmp & 0xffff; - intel_tv_mode_to_mode(&mode, &tv_mode); + intel_tv_mode_to_mode(&mode, &tv_mode, pipe_config->port_clock); drm_dbg_kms(&dev_priv->drm, "TV mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(&mode)); @@ -1184,6 +1185,9 @@ intel_tv_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) { + struct intel_atomic_state *state = + to_intel_atomic_state(pipe_config->uapi.state); + struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_tv_connector_state *tv_conn_state = to_intel_tv_connector_state(conn_state); @@ -1192,6 +1196,7 @@ intel_tv_compute_config(struct intel_encoder *encoder, &pipe_config->hw.adjusted_mode; int hdisplay = adjusted_mode->crtc_hdisplay; int vdisplay = adjusted_mode->crtc_vdisplay; + int ret; if (!tv_mode) return -EINVAL; @@ -1206,7 +1211,13 @@ intel_tv_compute_config(struct intel_encoder *encoder, pipe_config->port_clock = tv_mode->clock; - intel_tv_mode_to_mode(adjusted_mode, tv_mode); + ret = intel_dpll_crtc_compute_clock(state, crtc); + if (ret) + return ret; + + pipe_config->clock_set = true; + + intel_tv_mode_to_mode(adjusted_mode, tv_mode, pipe_config->port_clock); drm_mode_set_crtcinfo(adjusted_mode, 0); if (intel_tv_source_too_wide(dev_priv, hdisplay) || @@ -1804,7 +1815,7 @@ intel_tv_get_modes(struct drm_connector *connector) * about the actual timings of the mode. We * do ignore the margins though. */ - intel_tv_mode_to_mode(mode, tv_mode); + intel_tv_mode_to_mode(mode, tv_mode, tv_mode->clock); if (count == 0) { drm_dbg_kms(&dev_priv->drm, "TV mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode)); diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h index 509b0a419c20..a9f44abfc9fc 100644 --- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h @@ -76,6 +76,20 @@ struct bdb_header { } __packed; /* + * BDB version number dependencies are documented as: + * + * <start>+ + * indicates the field was introduced in version <start> + * and is still valid + * + * <start>-<end> + * indicates the field was introduced in version <start> + * and obsoleted in version <end>+1. + * + * ??? indicates the specific version number is unknown + */ + +/* * There are several types of BIOS data blocks (BDBs), each block has * an ID and size in the first 3 bytes (ID in first, size in next 2). * Known types are listed below. @@ -144,12 +158,12 @@ struct bdb_general_features { /* bits 3 */ u8 disable_smooth_vision:1; u8 single_dvi:1; - u8 rotate_180:1; /* 181 */ + u8 rotate_180:1; /* 181+ */ u8 fdi_rx_polarity_inverted:1; - u8 vbios_extended_mode:1; /* 160 */ - u8 copy_ilfp_dtd_to_sdvo_lvds_dtd:1; /* 160 */ - u8 panel_best_fit_timing:1; /* 160 */ - u8 ignore_strap_state:1; /* 160 */ + u8 vbios_extended_mode:1; /* 160+ */ + u8 copy_ilfp_dtd_to_sdvo_lvds_dtd:1; /* 160+ */ + u8 panel_best_fit_timing:1; /* 160+ */ + u8 ignore_strap_state:1; /* 160+ */ /* bits 4 */ u8 legacy_monitor_detect; @@ -164,11 +178,11 @@ struct bdb_general_features { u8 rsvd11:2; /* finish byte */ /* bits 6 */ - u8 tc_hpd_retry_timeout:7; /* 242 */ + u8 tc_hpd_retry_timeout:7; /* 242+ */ u8 rsvd12:1; /* bits 7 */ - u8 afc_startup_config:2;/* 249 */ + u8 afc_startup_config:2; /* 249+ */ u8 rsvd13:6; } __packed; @@ -183,6 +197,15 @@ struct bdb_general_features { #define GPIO_PIN_ADD_DDC_I2C 0x06 /* "ADDCARD DDC/I2C GPIO pins" */ /* Device handle */ +#define DEVICE_HANDLE_CRT 0x0001 +#define DEVICE_HANDLE_EFP1 0x0004 +#define DEVICE_HANDLE_EFP2 0x0040 +#define DEVICE_HANDLE_EFP3 0x0020 +#define DEVICE_HANDLE_EFP4 0x0010 /* 194+ */ +#define DEVICE_HANDLE_EFP5 0x0002 /* 215+ */ +#define DEVICE_HANDLE_EFP6 0x0001 /* 217+ */ +#define DEVICE_HANDLE_EFP7 0x0100 /* 217+ */ +#define DEVICE_HANDLE_EFP8 0x0200 /* 217+ */ #define DEVICE_HANDLE_LFP1 0x0008 #define DEVICE_HANDLE_LFP2 0x0080 @@ -275,27 +298,27 @@ struct bdb_general_features { #define DVO_PORT_DPC 8 #define DVO_PORT_DPD 9 #define DVO_PORT_DPA 10 -#define DVO_PORT_DPE 11 /* 193 */ -#define DVO_PORT_HDMIE 12 /* 193 */ +#define DVO_PORT_DPE 11 /* 193+ */ +#define DVO_PORT_HDMIE 12 /* 193+ */ #define DVO_PORT_DPF 13 /* N/A */ #define DVO_PORT_HDMIF 14 /* N/A */ -#define DVO_PORT_DPG 15 /* 217 */ -#define DVO_PORT_HDMIG 16 /* 217 */ -#define DVO_PORT_DPH 17 /* 217 */ -#define DVO_PORT_HDMIH 18 /* 217 */ -#define DVO_PORT_DPI 19 /* 217 */ -#define DVO_PORT_HDMII 20 /* 217 */ -#define DVO_PORT_MIPIA 21 /* 171 */ -#define DVO_PORT_MIPIB 22 /* 171 */ -#define DVO_PORT_MIPIC 23 /* 171 */ -#define DVO_PORT_MIPID 24 /* 171 */ - -#define HDMI_MAX_DATA_RATE_PLATFORM 0 /* 204 */ -#define HDMI_MAX_DATA_RATE_297 1 /* 204 */ -#define HDMI_MAX_DATA_RATE_165 2 /* 204 */ -#define HDMI_MAX_DATA_RATE_594 3 /* 249 */ -#define HDMI_MAX_DATA_RATE_340 4 /* 249 */ -#define HDMI_MAX_DATA_RATE_300 5 /* 249 */ +#define DVO_PORT_DPG 15 /* 217+ */ +#define DVO_PORT_HDMIG 16 /* 217+ */ +#define DVO_PORT_DPH 17 /* 217+ */ +#define DVO_PORT_HDMIH 18 /* 217+ */ +#define DVO_PORT_DPI 19 /* 217+ */ +#define DVO_PORT_HDMII 20 /* 217+ */ +#define DVO_PORT_MIPIA 21 /* 171+ */ +#define DVO_PORT_MIPIB 22 /* 171+ */ +#define DVO_PORT_MIPIC 23 /* 171+ */ +#define DVO_PORT_MIPID 24 /* 171+ */ + +#define HDMI_MAX_DATA_RATE_PLATFORM 0 /* 204+ */ +#define HDMI_MAX_DATA_RATE_297 1 /* 204+ */ +#define HDMI_MAX_DATA_RATE_165 2 /* 204+ */ +#define HDMI_MAX_DATA_RATE_594 3 /* 249+ */ +#define HDMI_MAX_DATA_RATE_340 4 /* 249+ */ +#define HDMI_MAX_DATA_RATE_300 5 /* 249+ */ #define LEGACY_CHILD_DEVICE_CONFIG_SIZE 33 @@ -362,10 +385,10 @@ enum vbt_gmbus_ddi { * basically any of the fields to ensure the correct interpretation for the BDB * version in question. * - * When we copy the child device configs to dev_priv->vbt.child_dev, we reserve - * space for the full structure below, and initialize the tail not actually - * present in VBT to zeros. Accessing those fields is fine, as long as the - * default zero is taken into account, again according to the BDB version. + * When we copy the child device configs to dev_priv->display.vbt.child_dev, we + * reserve space for the full structure below, and initialize the tail not + * actually present in VBT to zeros. Accessing those fields is fine, as long as + * the default zero is taken into account, again according to the BDB version. * * BDB versions 155 and below are considered legacy, and version 155 seems to be * a baseline for some of the VBT documentation. When adding new fields, please @@ -379,20 +402,30 @@ struct child_device_config { u8 device_id[10]; /* ascii string */ struct { u8 i2c_speed; - u8 dp_onboard_redriver; /* 158 */ - u8 dp_ondock_redriver; /* 158 */ - u8 hdmi_level_shifter_value:5; /* 169 */ - u8 hdmi_max_data_rate:3; /* 204 */ - u16 dtd_buf_ptr; /* 161 */ - u8 edidless_efp:1; /* 161 */ - u8 compression_enable:1; /* 198 */ - u8 compression_method_cps:1; /* 198 */ - u8 ganged_edp:1; /* 202 */ - u8 reserved0:4; - u8 compression_structure_index:4; /* 198 */ - u8 reserved1:4; - u8 slave_port; /* 202 */ - u8 reserved2; + u8 dp_onboard_redriver_preemph:3; /* 158+ */ + u8 dp_onboard_redriver_vswing:3; /* 158+ */ + u8 dp_onboard_redriver_present:1; /* 158+ */ + u8 reserved0:1; + u8 dp_ondock_redriver_preemph:3; /* 158+ */ + u8 dp_ondock_redriver_vswing:3; /* 158+ */ + u8 dp_ondock_redriver_present:1; /* 158+ */ + u8 reserved1:1; + u8 hdmi_level_shifter_value:5; /* 158+ */ + u8 hdmi_max_data_rate:3; /* 204+ */ + u16 dtd_buf_ptr; /* 161+ */ + u8 edidless_efp:1; /* 161+ */ + u8 compression_enable:1; /* 198+ */ + u8 compression_method_cps:1; /* 198+ */ + u8 ganged_edp:1; /* 202+ */ + u8 lttpr_non_transparent:1; /* 235+ */ + u8 disable_compression_for_ext_disp:1; /* 251+ */ + u8 reserved2:2; + u8 compression_structure_index:4; /* 198+ */ + u8 reserved3:4; + u8 hdmi_max_frl_rate:4; /* 237+ */ + u8 hdmi_max_frl_rate_valid:1; /* 237+ */ + u8 reserved4:3; /* 237+ */ + u8 reserved5; } __packed; } __packed; @@ -412,16 +445,16 @@ struct child_device_config { u8 ddc2_pin; } __packed; struct { - u8 efp_routed:1; /* 158 */ - u8 lane_reversal:1; /* 184 */ - u8 lspcon:1; /* 192 */ - u8 iboost:1; /* 196 */ - u8 hpd_invert:1; /* 196 */ - u8 use_vbt_vswing:1; /* 218 */ - u8 flag_reserved:2; - u8 hdmi_support:1; /* 158 */ - u8 dp_support:1; /* 158 */ - u8 tmds_support:1; /* 158 */ + u8 efp_routed:1; /* 158+ */ + u8 lane_reversal:1; /* 184+ */ + u8 lspcon:1; /* 192+ */ + u8 iboost:1; /* 196+ */ + u8 hpd_invert:1; /* 196+ */ + u8 use_vbt_vswing:1; /* 218+ */ + u8 dp_max_lane_count:2; /* 244+ */ + u8 hdmi_support:1; /* 158+ */ + u8 dp_support:1; /* 158+ */ + u8 tmds_support:1; /* 158+ */ u8 support_reserved:5; u8 aux_channel; u8 dongle_detect; @@ -429,7 +462,7 @@ struct child_device_config { } __packed; u8 pipe_cap:2; - u8 sdvo_stall:1; /* 158 */ + u8 sdvo_stall:1; /* 158+ */ u8 hpd_status:2; u8 integrated_encoder:1; u8 capabilities_reserved:2; @@ -437,21 +470,21 @@ struct child_device_config { union { u8 dvo2_wiring; - u8 mipi_bridge_type; /* 171 */ + u8 mipi_bridge_type; /* 171+ */ } __packed; u16 extended_type; u8 dvo_function; - u8 dp_usb_type_c:1; /* 195 */ - u8 tbt:1; /* 209 */ - u8 flags2_reserved:2; /* 195 */ - u8 dp_port_trace_length:4; /* 209 */ - u8 dp_gpio_index; /* 195 */ - u16 dp_gpio_pin_num; /* 195 */ - u8 dp_iboost_level:4; /* 196 */ - u8 hdmi_iboost_level:4; /* 196 */ - u8 dp_max_link_rate:3; /* 216/230 GLK+ */ - u8 dp_max_link_rate_reserved:5; /* 216/230 */ + u8 dp_usb_type_c:1; /* 195+ */ + u8 tbt:1; /* 209+ */ + u8 flags2_reserved:2; /* 195+ */ + u8 dp_port_trace_length:4; /* 209+ */ + u8 dp_gpio_index; /* 195+ */ + u16 dp_gpio_pin_num; /* 195+ */ + u8 dp_iboost_level:4; /* 196+ */ + u8 hdmi_iboost_level:4; /* 196+ */ + u8 dp_max_link_rate:3; /* 216+ */ + u8 dp_max_link_rate_reserved:5; /* 216+ */ } __packed; struct bdb_general_definitions { @@ -459,7 +492,7 @@ struct bdb_general_definitions { u8 crt_ddc_gmbus_pin; /* DPMS bits */ - u8 dpms_acpi:1; + u8 dpms_non_acpi:1; u8 skip_boot_crt_detect:1; u8 dpms_aim:1; u8 rsvd1:5; /* finish byte */ @@ -488,25 +521,25 @@ struct bdb_general_definitions { struct psr_table { /* Feature bits */ - u8 full_link:1; - u8 require_aux_to_wakeup:1; + u8 full_link:1; /* 165+ */ + u8 require_aux_to_wakeup:1; /* 165+ */ u8 feature_bits_rsvd:6; /* Wait times */ - u8 idle_frames:4; - u8 lines_to_wait:3; + u8 idle_frames:4; /* 165+ */ + u8 lines_to_wait:3; /* 165+ */ u8 wait_times_rsvd:1; /* TP wake up time in multiple of 100 */ - u16 tp1_wakeup_time; - u16 tp2_tp3_wakeup_time; + u16 tp1_wakeup_time; /* 165+ */ + u16 tp2_tp3_wakeup_time; /* 165+ */ } __packed; struct bdb_psr { struct psr_table psr_table[16]; /* PSR2 TP2/TP3 wakeup time for 16 panels */ - u32 psr2_tp2_tp3_wakeup_time; + u32 psr2_tp2_tp3_wakeup_time; /* 226+ */ } __packed; /* @@ -519,9 +552,10 @@ struct bdb_psr { #define BDB_DRIVER_FEATURE_INT_SDVO_LVDS 3 struct bdb_driver_features { + /* Driver bits */ u8 boot_dev_algorithm:1; - u8 block_display_switch:1; - u8 allow_display_switch:1; + u8 allow_display_switch_dvd:1; + u8 allow_display_switch_dos:1; u8 hotplug_dvo:1; u8 dual_view_zoom:1; u8 int15h_hook:1; @@ -533,6 +567,7 @@ struct bdb_driver_features { u8 boot_mode_bpp; u8 boot_mode_refresh; + /* Extended Driver Bits 1 */ u16 enable_lfp_primary:1; u16 selective_mode_pruning:1; u16 dual_frequency:1; @@ -548,29 +583,40 @@ struct bdb_driver_features { u16 tv_hotplug:1; u16 hdmi_config:2; - u8 static_display:1; - u8 reserved2:7; + /* Driver Flags 1 */ + u8 static_display:1; /* 163+ */ + u8 embedded_platform:1; /* 163+ */ + u8 display_subsystem_enable:1; /* 163+ */ + u8 reserved0:5; + u16 legacy_crt_max_x; u16 legacy_crt_max_y; u8 legacy_crt_max_refresh; - u8 hdmi_termination; - u8 custom_vbt_version; - /* Driver features data block */ - u16 rmpm_enabled:1; - u16 s2ddt_enabled:1; - u16 dpst_enabled:1; - u16 bltclt_enabled:1; - u16 adb_enabled:1; - u16 drrs_enabled:1; - u16 grs_enabled:1; - u16 gpmt_enabled:1; - u16 tbt_enabled:1; - u16 psr_enabled:1; - u16 ips_enabled:1; - u16 reserved3:1; - u16 dmrrs_enabled:1; - u16 reserved4:2; + /* Extended Driver Bits 2 */ + u8 hdmi_termination:1; + u8 cea861d_hdmi_support:1; + u8 self_refresh_enable:1; + u8 reserved1:5; + + u8 custom_vbt_version; /* 155+ */ + + /* Driver Feature Flags */ + u16 rmpm_enabled:1; /* 165+ */ + u16 s2ddt_enabled:1; /* 165+ */ + u16 dpst_enabled:1; /* 165-227 */ + u16 bltclt_enabled:1; /* 165+ */ + u16 adb_enabled:1; /* 165-227 */ + u16 drrs_enabled:1; /* 165-227 */ + u16 grs_enabled:1; /* 165+ */ + u16 gpmt_enabled:1; /* 165+ */ + u16 tbt_enabled:1; /* 165+ */ + u16 psr_enabled:1; /* 165-227 */ + u16 ips_enabled:1; /* 165+ */ + u16 dpfs_enabled:1; /* 165+ */ + u16 dmrrs_enabled:1; /* 174-227 */ + u16 adt_enabled:1; /* ???-228 */ + u16 hpd_wake:1; /* 201-240 */ u16 pc_feature_valid:1; } __packed; @@ -657,7 +703,7 @@ struct bdb_sdvo_panel_dtds { struct edp_fast_link_params { - u8 rate:4; + u8 rate:4; /* ???-223 */ u8 lanes:4; u8 preemphasis:4; u8 vswing:4; @@ -690,18 +736,18 @@ struct bdb_edp { u32 sdrrs_msa_timing_delay; /* ith bit indicates enabled/disabled for (i+1)th panel */ - u16 edp_s3d_feature; /* 162 */ - u16 edp_t3_optimization; /* 165 */ - u64 edp_vswing_preemph; /* 173 */ - u16 fast_link_training; /* 182 */ - u16 dpcd_600h_write_required; /* 185 */ - struct edp_pwm_delays pwm_delays[16]; /* 186 */ - u16 full_link_params_provided; /* 199 */ - struct edp_full_link_params full_link_params[16]; /* 199 */ - u16 apical_enable; /* 203 */ - struct edp_apical_params apical_params[16]; /* 203 */ - u16 edp_fast_link_training_rate[16]; /* 224 */ - u16 edp_max_port_link_rate[16]; /* 244 */ + u16 edp_s3d_feature; /* 162+ */ + u16 edp_t3_optimization; /* 165+ */ + u64 edp_vswing_preemph; /* 173+ */ + u16 fast_link_training; /* 182+ */ + u16 dpcd_600h_write_required; /* 185+ */ + struct edp_pwm_delays pwm_delays[16]; /* 186+ */ + u16 full_link_params_provided; /* 199+ */ + struct edp_full_link_params full_link_params[16]; /* 199+ */ + u16 apical_enable; /* 203+ */ + struct edp_apical_params apical_params[16]; /* 203+ */ + u16 edp_fast_link_training_rate[16]; /* 224+ */ + u16 edp_max_port_link_rate[16]; /* 244+ */ } __packed; /* @@ -710,14 +756,14 @@ struct bdb_edp { struct bdb_lvds_options { u8 panel_type; - u8 panel_type2; /* 212 */ + u8 panel_type2; /* 212+ */ /* LVDS capabilities, stored in a dword */ u8 pfit_mode:2; u8 pfit_text_mode_enhanced:1; u8 pfit_gfx_mode_enhanced:1; u8 pfit_ratio_auto:1; u8 pixel_dither:1; - u8 lvds_edid:1; + u8 lvds_edid:1; /* ???-240 */ u8 rsvd2:1; u8 rsvd4; /* LVDS Panel channel bits stored here */ @@ -731,11 +777,11 @@ struct bdb_lvds_options { /* LVDS panel type bits stored here */ u32 dps_panel_type_bits; /* LVDS backlight control type bits stored here */ - u32 blt_control_type_bits; + u32 blt_control_type_bits; /* ???-240 */ - u16 lcdvcc_s0_enable; /* 200 */ - u32 rotation; /* 228 */ - u32 position; /* 240 */ + u16 lcdvcc_s0_enable; /* 200+ */ + u32 rotation; /* 228+ */ + u32 position; /* 240+ */ } __packed; /* @@ -756,7 +802,7 @@ struct lvds_lfp_data_ptr { struct bdb_lvds_lfp_data_ptrs { u8 lvds_entries; struct lvds_lfp_data_ptr ptr[16]; - struct lvds_lfp_data_ptr_table panel_name; /* 156-163? */ + struct lvds_lfp_data_ptr_table panel_name; /* (156-163?)+ */ } __packed; /* @@ -808,20 +854,20 @@ struct lvds_lfp_panel_name { } __packed; struct lvds_lfp_black_border { - u8 top; /* 227 */ - u8 bottom; /* 227 */ - u8 left; /* 238 */ - u8 right; /* 238 */ + u8 top; /* 227+ */ + u8 bottom; /* 227+ */ + u8 left; /* 238+ */ + u8 right; /* 238+ */ } __packed; struct bdb_lvds_lfp_data_tail { - struct lvds_lfp_panel_name panel_name[16]; /* 156-163? */ - u16 scaling_enable; /* 187 */ - u8 seamless_drrs_min_refresh_rate[16]; /* 188 */ - u8 pixel_overlap_count[16]; /* 208 */ - struct lvds_lfp_black_border black_border[16]; /* 227 */ - u16 dual_lfp_port_sync_enable; /* 231 */ - u16 gpu_dithering_for_banding_artifacts; /* 245 */ + struct lvds_lfp_panel_name panel_name[16]; /* (156-163?)+ */ + u16 scaling_enable; /* 187+ */ + u8 seamless_drrs_min_refresh_rate[16]; /* 188+ */ + u8 pixel_overlap_count[16]; /* 208+ */ + struct lvds_lfp_black_border black_border[16]; /* 227+ */ + u16 dual_lfp_port_sync_enable; /* 231+ */ + u16 gpu_dithering_for_banding_artifacts; /* 245+ */ } __packed; /* @@ -836,7 +882,7 @@ struct lfp_backlight_data_entry { u8 active_low_pwm:1; u8 obsolete1:5; u16 pwm_freq_hz; - u8 min_brightness; /* Obsolete from 234+ */ + u8 min_brightness; /* ???-233 */ u8 obsolete2; u8 obsolete3; } __packed; @@ -859,7 +905,7 @@ struct lfp_brightness_level { struct bdb_lfp_backlight_data { u8 entry_size; struct lfp_backlight_data_entry data[16]; - u8 level[16]; /* Obsolete from 234+ */ + u8 level[16]; /* ???-233 */ struct lfp_backlight_control_method backlight_control[16]; struct lfp_brightness_level brightness_level[16]; /* 234+ */ struct lfp_brightness_level brightness_min_level[16]; /* 234+ */ @@ -874,8 +920,8 @@ struct lfp_power_features { u8 reserved1:1; u8 power_conservation_pref:3; u8 reserved2:1; - u8 lace_enabled_status:1; - u8 lace_support:1; + u8 lace_enabled_status:1; /* 210+ */ + u8 lace_support:1; /* 210+ */ u8 als_enable:1; } __packed; @@ -895,24 +941,24 @@ struct aggressiveness_profile2_entry { } __packed; struct bdb_lfp_power { - struct lfp_power_features features; + struct lfp_power_features features; /* ???-227 */ struct als_data_entry als[5]; - u8 lace_aggressiveness_profile:3; + u8 lace_aggressiveness_profile:3; /* 210-227 */ u8 reserved1:5; - u16 dpst; - u16 psr; - u16 drrs; - u16 lace_support; - u16 adt; - u16 dmrrs; - u16 adb; - u16 lace_enabled_status; - struct aggressiveness_profile_entry aggressiveness[16]; - u16 hobl; /* 232+ */ - u16 vrr_feature_enabled; /* 233+ */ - u16 elp; /* 247+ */ - u16 opst; /* 247+ */ - struct aggressiveness_profile2_entry aggressiveness2[16]; /* 247+ */ + u16 dpst; /* 228+ */ + u16 psr; /* 228+ */ + u16 drrs; /* 228+ */ + u16 lace_support; /* 228+ */ + u16 adt; /* 228+ */ + u16 dmrrs; /* 228+ */ + u16 adb; /* 228+ */ + u16 lace_enabled_status; /* 228+ */ + struct aggressiveness_profile_entry aggressiveness[16]; /* 228+ */ + u16 hobl; /* 232+ */ + u16 vrr_feature_enabled; /* 233+ */ + u16 elp; /* 247+ */ + u16 opst; /* 247+ */ + struct aggressiveness_profile2_entry aggressiveness2[16]; /* 247+ */ } __packed; /* @@ -922,10 +968,10 @@ struct bdb_lfp_power { #define MAX_MIPI_CONFIGURATIONS 6 struct bdb_mipi_config { - struct mipi_config config[MAX_MIPI_CONFIGURATIONS]; /* 175 */ - struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS]; /* 177 */ - struct edp_pwm_delays pwm_delays[MAX_MIPI_CONFIGURATIONS]; /* 186 */ - u8 pmic_i2c_bus_number[MAX_MIPI_CONFIGURATIONS]; /* 190 */ + struct mipi_config config[MAX_MIPI_CONFIGURATIONS]; /* 175+ */ + struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS]; /* 177+ */ + struct edp_pwm_delays pwm_delays[MAX_MIPI_CONFIGURATIONS]; /* 186+ */ + u8 pmic_i2c_bus_number[MAX_MIPI_CONFIGURATIONS]; /* 190+ */ } __packed; /* diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c index 43e1bbc1e303..269f9792390d 100644 --- a/drivers/gpu/drm/i915/display/intel_vdsc.c +++ b/drivers/gpu/drm/i915/display/intel_vdsc.c @@ -344,7 +344,7 @@ bool intel_dsc_source_support(const struct intel_crtc_state *crtc_state) struct drm_i915_private *i915 = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; - if (!INTEL_INFO(i915)->display.has_dsc) + if (!RUNTIME_INFO(i915)->has_dsc) return false; if (DISPLAY_VER(i915) >= 12) @@ -460,7 +460,6 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config) u8 i = 0; vdsc_cfg->pic_width = pipe_config->hw.adjusted_mode.crtc_hdisplay; - vdsc_cfg->pic_height = pipe_config->hw.adjusted_mode.crtc_vdisplay; vdsc_cfg->slice_width = DIV_ROUND_UP(vdsc_cfg->pic_width, pipe_config->dsc.slice_count); @@ -597,6 +596,8 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) DSC_VER_MIN_SHIFT | vdsc_cfg->bits_per_component << DSC_BPC_SHIFT | vdsc_cfg->line_buf_depth << DSC_LINE_BUF_DEPTH_SHIFT; + if (vdsc_cfg->dsc_version_minor == 2) + pps_val |= DSC_ALT_ICH_SEL; if (vdsc_cfg->block_pred_enable) pps_val |= DSC_BLOCK_PREDICTION; if (vdsc_cfg->convert_rgb) diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c index 04250a0fec3c..5eac99021875 100644 --- a/drivers/gpu/drm/i915/display/intel_vrr.c +++ b/drivers/gpu/drm/i915/display/intel_vrr.c @@ -142,11 +142,16 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state, * For XE_LPD+, we use guardband and pipeline override * is deprecated. */ - if (DISPLAY_VER(i915) >= 13) + if (DISPLAY_VER(i915) >= 13) { + /* + * FIXME: Subtract Window2 delay from below value. + * + * Window2 specifies time required to program DSB (Window2) in + * number of scan lines. Assuming 0 for no DSB. + */ crtc_state->vrr.guardband = - crtc_state->vrr.vmin - adjusted_mode->crtc_vdisplay - - i915->window2_delay; - else + crtc_state->vrr.vmin - adjusted_mode->crtc_vdisplay; + } else { /* * FIXME: s/4/framestart_delay/ to get consistent * earliest/latest points for register latching regardless @@ -159,6 +164,7 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state, */ crtc_state->vrr.pipeline_full = min(255, crtc_state->vrr.vmin - adjusted_mode->crtc_vdisplay - 4 - 1); + } crtc_state->mode_flags |= I915_MODE_FLAG_VRR; } diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c index 4d6a27757065..7cb713043408 100644 --- a/drivers/gpu/drm/i915/display/skl_universal_plane.c +++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c @@ -14,11 +14,11 @@ #include "intel_display_types.h" #include "intel_fb.h" #include "intel_fbc.h" -#include "intel_pm.h" #include "intel_psr.h" #include "intel_sprite.h" #include "skl_scaler.h" #include "skl_universal_plane.h" +#include "skl_watermark.h" #include "pxp/intel_pxp.h" static const u32 skl_plane_formats[] = { @@ -1928,7 +1928,7 @@ static enum intel_fbc_id skl_fbc_id_for_pipe(enum pipe pipe) static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv, enum intel_fbc_id fbc_id, enum plane_id plane_id) { - if ((INTEL_INFO(dev_priv)->display.fbc_mask & BIT(fbc_id)) == 0) + if ((RUNTIME_INFO(dev_priv)->fbc_mask & BIT(fbc_id)) == 0) return false; return plane_id == PLANE_PRIMARY; @@ -1940,7 +1940,7 @@ static struct intel_fbc *skl_plane_fbc(struct drm_i915_private *dev_priv, enum intel_fbc_id fbc_id = skl_fbc_id_for_pipe(pipe); if (skl_plane_has_fbc(dev_priv, fbc_id, plane_id)) - return dev_priv->fbc[fbc_id]; + return dev_priv->display.fbc[fbc_id]; else return NULL; } diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c new file mode 100644 index 000000000000..01b0932757ed --- /dev/null +++ b/drivers/gpu/drm/i915/display/skl_watermark.c @@ -0,0 +1,3562 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2022 Intel Corporation + */ + +#include <drm/drm_blend.h> + +#include "intel_atomic.h" +#include "intel_atomic_plane.h" +#include "intel_bw.h" +#include "intel_de.h" +#include "intel_display.h" +#include "intel_display_power.h" +#include "intel_display_types.h" +#include "intel_fb.h" +#include "skl_watermark.h" + +#include "i915_drv.h" +#include "i915_fixed.h" +#include "i915_reg.h" +#include "intel_pcode.h" +#include "intel_pm.h" + +static void skl_sagv_disable(struct drm_i915_private *i915); + +/* Stores plane specific WM parameters */ +struct skl_wm_params { + bool x_tiled, y_tiled; + bool rc_surface; + bool is_planar; + u32 width; + u8 cpp; + u32 plane_pixel_rate; + u32 y_min_scanlines; + u32 plane_bytes_per_line; + uint_fixed_16_16_t plane_blocks_per_line; + uint_fixed_16_16_t y_tile_minimum; + u32 linetime_us; + u32 dbuf_block_size; +}; + +u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *i915) +{ + u8 enabled_slices = 0; + enum dbuf_slice slice; + + for_each_dbuf_slice(i915, slice) { + if (intel_uncore_read(&i915->uncore, + DBUF_CTL_S(slice)) & DBUF_POWER_STATE) + enabled_slices |= BIT(slice); + } + + return enabled_slices; +} + +/* + * FIXME: We still don't have the proper code detect if we need to apply the WA, + * so assume we'll always need it in order to avoid underruns. + */ +static bool skl_needs_memory_bw_wa(struct drm_i915_private *i915) +{ + return DISPLAY_VER(i915) == 9; +} + +static bool +intel_has_sagv(struct drm_i915_private *i915) +{ + return DISPLAY_VER(i915) >= 9 && !IS_LP(i915) && + i915->display.sagv.status != I915_SAGV_NOT_CONTROLLED; +} + +static u32 +intel_sagv_block_time(struct drm_i915_private *i915) +{ + if (DISPLAY_VER(i915) >= 14) { + u32 val; + + val = intel_uncore_read(&i915->uncore, MTL_LATENCY_SAGV); + + return REG_FIELD_GET(MTL_LATENCY_QCLK_SAGV, val); + } else if (DISPLAY_VER(i915) >= 12) { + u32 val = 0; + int ret; + + ret = snb_pcode_read(&i915->uncore, + GEN12_PCODE_READ_SAGV_BLOCK_TIME_US, + &val, NULL); + if (ret) { + drm_dbg_kms(&i915->drm, "Couldn't read SAGV block time!\n"); + return 0; + } + + return val; + } else if (DISPLAY_VER(i915) == 11) { + return 10; + } else if (DISPLAY_VER(i915) == 9 && !IS_LP(i915)) { + return 30; + } else { + return 0; + } +} + +static void intel_sagv_init(struct drm_i915_private *i915) +{ + if (!intel_has_sagv(i915)) + i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED; + + /* + * Probe to see if we have working SAGV control. + * For icl+ this was already determined by intel_bw_init_hw(). + */ + if (DISPLAY_VER(i915) < 11) + skl_sagv_disable(i915); + + drm_WARN_ON(&i915->drm, i915->display.sagv.status == I915_SAGV_UNKNOWN); + + i915->display.sagv.block_time_us = intel_sagv_block_time(i915); + + drm_dbg_kms(&i915->drm, "SAGV supported: %s, original SAGV block time: %u us\n", + str_yes_no(intel_has_sagv(i915)), i915->display.sagv.block_time_us); + + /* avoid overflow when adding with wm0 latency/etc. */ + if (drm_WARN(&i915->drm, i915->display.sagv.block_time_us > U16_MAX, + "Excessive SAGV block time %u, ignoring\n", + i915->display.sagv.block_time_us)) + i915->display.sagv.block_time_us = 0; + + if (!intel_has_sagv(i915)) + i915->display.sagv.block_time_us = 0; +} + +/* + * SAGV dynamically adjusts the system agent voltage and clock frequencies + * depending on power and performance requirements. The display engine access + * to system memory is blocked during the adjustment time. Because of the + * blocking time, having this enabled can cause full system hangs and/or pipe + * underruns if we don't meet all of the following requirements: + * + * - <= 1 pipe enabled + * - All planes can enable watermarks for latencies >= SAGV engine block time + * - We're not using an interlaced display configuration + */ +static void skl_sagv_enable(struct drm_i915_private *i915) +{ + int ret; + + if (!intel_has_sagv(i915)) + return; + + if (i915->display.sagv.status == I915_SAGV_ENABLED) + return; + + drm_dbg_kms(&i915->drm, "Enabling SAGV\n"); + ret = snb_pcode_write(&i915->uncore, GEN9_PCODE_SAGV_CONTROL, + GEN9_SAGV_ENABLE); + + /* We don't need to wait for SAGV when enabling */ + + /* + * Some skl systems, pre-release machines in particular, + * don't actually have SAGV. + */ + if (IS_SKYLAKE(i915) && ret == -ENXIO) { + drm_dbg(&i915->drm, "No SAGV found on system, ignoring\n"); + i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED; + return; + } else if (ret < 0) { + drm_err(&i915->drm, "Failed to enable SAGV\n"); + return; + } + + i915->display.sagv.status = I915_SAGV_ENABLED; +} + +static void skl_sagv_disable(struct drm_i915_private *i915) +{ + int ret; + + if (!intel_has_sagv(i915)) + return; + + if (i915->display.sagv.status == I915_SAGV_DISABLED) + return; + + drm_dbg_kms(&i915->drm, "Disabling SAGV\n"); + /* bspec says to keep retrying for at least 1 ms */ + ret = skl_pcode_request(&i915->uncore, GEN9_PCODE_SAGV_CONTROL, + GEN9_SAGV_DISABLE, + GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED, + 1); + /* + * Some skl systems, pre-release machines in particular, + * don't actually have SAGV. + */ + if (IS_SKYLAKE(i915) && ret == -ENXIO) { + drm_dbg(&i915->drm, "No SAGV found on system, ignoring\n"); + i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED; + return; + } else if (ret < 0) { + drm_err(&i915->drm, "Failed to disable SAGV (%d)\n", ret); + return; + } + + i915->display.sagv.status = I915_SAGV_DISABLED; +} + +static void skl_sagv_pre_plane_update(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_bw_state *new_bw_state = + intel_atomic_get_new_bw_state(state); + + if (!new_bw_state) + return; + + if (!intel_can_enable_sagv(i915, new_bw_state)) + skl_sagv_disable(i915); +} + +static void skl_sagv_post_plane_update(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_bw_state *new_bw_state = + intel_atomic_get_new_bw_state(state); + + if (!new_bw_state) + return; + + if (intel_can_enable_sagv(i915, new_bw_state)) + skl_sagv_enable(i915); +} + +static void icl_sagv_pre_plane_update(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_bw_state *old_bw_state = + intel_atomic_get_old_bw_state(state); + const struct intel_bw_state *new_bw_state = + intel_atomic_get_new_bw_state(state); + u16 old_mask, new_mask; + + if (!new_bw_state) + return; + + old_mask = old_bw_state->qgv_points_mask; + new_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask; + + if (old_mask == new_mask) + return; + + WARN_ON(!new_bw_state->base.changed); + + drm_dbg_kms(&i915->drm, "Restricting QGV points: 0x%x -> 0x%x\n", + old_mask, new_mask); + + /* + * Restrict required qgv points before updating the configuration. + * According to BSpec we can't mask and unmask qgv points at the same + * time. Also masking should be done before updating the configuration + * and unmasking afterwards. + */ + icl_pcode_restrict_qgv_points(i915, new_mask); +} + +static void icl_sagv_post_plane_update(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_bw_state *old_bw_state = + intel_atomic_get_old_bw_state(state); + const struct intel_bw_state *new_bw_state = + intel_atomic_get_new_bw_state(state); + u16 old_mask, new_mask; + + if (!new_bw_state) + return; + + old_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask; + new_mask = new_bw_state->qgv_points_mask; + + if (old_mask == new_mask) + return; + + WARN_ON(!new_bw_state->base.changed); + + drm_dbg_kms(&i915->drm, "Relaxing QGV points: 0x%x -> 0x%x\n", + old_mask, new_mask); + + /* + * Allow required qgv points after updating the configuration. + * According to BSpec we can't mask and unmask qgv points at the same + * time. Also masking should be done before updating the configuration + * and unmasking afterwards. + */ + icl_pcode_restrict_qgv_points(i915, new_mask); +} + +void intel_sagv_pre_plane_update(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + + /* + * Just return if we can't control SAGV or don't have it. + * This is different from situation when we have SAGV but just can't + * afford it due to DBuf limitation - in case if SAGV is completely + * disabled in a BIOS, we are not even allowed to send a PCode request, + * as it will throw an error. So have to check it here. + */ + if (!intel_has_sagv(i915)) + return; + + if (DISPLAY_VER(i915) >= 11) + icl_sagv_pre_plane_update(state); + else + skl_sagv_pre_plane_update(state); +} + +void intel_sagv_post_plane_update(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + + /* + * Just return if we can't control SAGV or don't have it. + * This is different from situation when we have SAGV but just can't + * afford it due to DBuf limitation - in case if SAGV is completely + * disabled in a BIOS, we are not even allowed to send a PCode request, + * as it will throw an error. So have to check it here. + */ + if (!intel_has_sagv(i915)) + return; + + if (DISPLAY_VER(i915) >= 11) + icl_sagv_post_plane_update(state); + else + skl_sagv_post_plane_update(state); +} + +static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + enum plane_id plane_id; + int max_level = INT_MAX; + + if (!intel_has_sagv(i915)) + return false; + + if (!crtc_state->hw.active) + return true; + + if (crtc_state->hw.pipe_mode.flags & DRM_MODE_FLAG_INTERLACE) + return false; + + for_each_plane_id_on_crtc(crtc, plane_id) { + const struct skl_plane_wm *wm = + &crtc_state->wm.skl.optimal.planes[plane_id]; + int level; + + /* Skip this plane if it's not enabled */ + if (!wm->wm[0].enable) + continue; + + /* Find the highest enabled wm level for this plane */ + for (level = ilk_wm_max_level(i915); + !wm->wm[level].enable; --level) + { } + + /* Highest common enabled wm level for all planes */ + max_level = min(level, max_level); + } + + /* No enabled planes? */ + if (max_level == INT_MAX) + return true; + + for_each_plane_id_on_crtc(crtc, plane_id) { + const struct skl_plane_wm *wm = + &crtc_state->wm.skl.optimal.planes[plane_id]; + + /* + * All enabled planes must have enabled a common wm level that + * can tolerate memory latencies higher than sagv_block_time_us + */ + if (wm->wm[0].enable && !wm->wm[max_level].can_sagv) + return false; + } + + return true; +} + +static bool tgl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + enum plane_id plane_id; + + if (!crtc_state->hw.active) + return true; + + for_each_plane_id_on_crtc(crtc, plane_id) { + const struct skl_plane_wm *wm = + &crtc_state->wm.skl.optimal.planes[plane_id]; + + if (wm->wm[0].enable && !wm->sagv.wm0.enable) + return false; + } + + return true; +} + +static bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + + if (DISPLAY_VER(i915) >= 12) + return tgl_crtc_can_enable_sagv(crtc_state); + else + return skl_crtc_can_enable_sagv(crtc_state); +} + +bool intel_can_enable_sagv(struct drm_i915_private *i915, + const struct intel_bw_state *bw_state) +{ + if (DISPLAY_VER(i915) < 11 && + bw_state->active_pipes && !is_power_of_2(bw_state->active_pipes)) + return false; + + return bw_state->pipe_sagv_reject == 0; +} + +static int intel_compute_sagv_mask(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + int ret; + struct intel_crtc *crtc; + struct intel_crtc_state *new_crtc_state; + struct intel_bw_state *new_bw_state = NULL; + const struct intel_bw_state *old_bw_state = NULL; + int i; + + for_each_new_intel_crtc_in_state(state, crtc, + new_crtc_state, i) { + new_bw_state = intel_atomic_get_bw_state(state); + if (IS_ERR(new_bw_state)) + return PTR_ERR(new_bw_state); + + old_bw_state = intel_atomic_get_old_bw_state(state); + + if (intel_crtc_can_enable_sagv(new_crtc_state)) + new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe); + else + new_bw_state->pipe_sagv_reject |= BIT(crtc->pipe); + } + + if (!new_bw_state) + return 0; + + new_bw_state->active_pipes = + intel_calc_active_pipes(state, old_bw_state->active_pipes); + + if (new_bw_state->active_pipes != old_bw_state->active_pipes) { + ret = intel_atomic_lock_global_state(&new_bw_state->base); + if (ret) + return ret; + } + + if (intel_can_enable_sagv(i915, new_bw_state) != + intel_can_enable_sagv(i915, old_bw_state)) { + ret = intel_atomic_serialize_global_state(&new_bw_state->base); + if (ret) + return ret; + } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) { + ret = intel_atomic_lock_global_state(&new_bw_state->base); + if (ret) + return ret; + } + + for_each_new_intel_crtc_in_state(state, crtc, + new_crtc_state, i) { + struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal; + + /* + * We store use_sagv_wm in the crtc state rather than relying on + * that bw state since we have no convenient way to get at the + * latter from the plane commit hooks (especially in the legacy + * cursor case) + */ + pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(i915) && + DISPLAY_VER(i915) >= 12 && + intel_can_enable_sagv(i915, new_bw_state); + } + + return 0; +} + +static u16 skl_ddb_entry_init(struct skl_ddb_entry *entry, + u16 start, u16 end) +{ + entry->start = start; + entry->end = end; + + return end; +} + +static int intel_dbuf_slice_size(struct drm_i915_private *i915) +{ + return INTEL_INFO(i915)->display.dbuf.size / + hweight8(INTEL_INFO(i915)->display.dbuf.slice_mask); +} + +static void +skl_ddb_entry_for_slices(struct drm_i915_private *i915, u8 slice_mask, + struct skl_ddb_entry *ddb) +{ + int slice_size = intel_dbuf_slice_size(i915); + + if (!slice_mask) { + ddb->start = 0; + ddb->end = 0; + return; + } + + ddb->start = (ffs(slice_mask) - 1) * slice_size; + ddb->end = fls(slice_mask) * slice_size; + + WARN_ON(ddb->start >= ddb->end); + WARN_ON(ddb->end > INTEL_INFO(i915)->display.dbuf.size); +} + +static unsigned int mbus_ddb_offset(struct drm_i915_private *i915, u8 slice_mask) +{ + struct skl_ddb_entry ddb; + + if (slice_mask & (BIT(DBUF_S1) | BIT(DBUF_S2))) + slice_mask = BIT(DBUF_S1); + else if (slice_mask & (BIT(DBUF_S3) | BIT(DBUF_S4))) + slice_mask = BIT(DBUF_S3); + + skl_ddb_entry_for_slices(i915, slice_mask, &ddb); + + return ddb.start; +} + +u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *i915, + const struct skl_ddb_entry *entry) +{ + int slice_size = intel_dbuf_slice_size(i915); + enum dbuf_slice start_slice, end_slice; + u8 slice_mask = 0; + + if (!skl_ddb_entry_size(entry)) + return 0; + + start_slice = entry->start / slice_size; + end_slice = (entry->end - 1) / slice_size; + + /* + * Per plane DDB entry can in a really worst case be on multiple slices + * but single entry is anyway contigious. + */ + while (start_slice <= end_slice) { + slice_mask |= BIT(start_slice); + start_slice++; + } + + return slice_mask; +} + +static unsigned int intel_crtc_ddb_weight(const struct intel_crtc_state *crtc_state) +{ + const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; + int hdisplay, vdisplay; + + if (!crtc_state->hw.active) + return 0; + + /* + * Watermark/ddb requirement highly depends upon width of the + * framebuffer, So instead of allocating DDB equally among pipes + * distribute DDB based on resolution/width of the display. + */ + drm_mode_get_hv_timing(pipe_mode, &hdisplay, &vdisplay); + + return hdisplay; +} + +static void intel_crtc_dbuf_weights(const struct intel_dbuf_state *dbuf_state, + enum pipe for_pipe, + unsigned int *weight_start, + unsigned int *weight_end, + unsigned int *weight_total) +{ + struct drm_i915_private *i915 = + to_i915(dbuf_state->base.state->base.dev); + enum pipe pipe; + + *weight_start = 0; + *weight_end = 0; + *weight_total = 0; + + for_each_pipe(i915, pipe) { + int weight = dbuf_state->weight[pipe]; + + /* + * Do not account pipes using other slice sets + * luckily as of current BSpec slice sets do not partially + * intersect(pipes share either same one slice or same slice set + * i.e no partial intersection), so it is enough to check for + * equality for now. + */ + if (dbuf_state->slices[pipe] != dbuf_state->slices[for_pipe]) + continue; + + *weight_total += weight; + if (pipe < for_pipe) { + *weight_start += weight; + *weight_end += weight; + } else if (pipe == for_pipe) { + *weight_end += weight; + } + } +} + +static int +skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc) +{ + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + unsigned int weight_total, weight_start, weight_end; + const struct intel_dbuf_state *old_dbuf_state = + intel_atomic_get_old_dbuf_state(state); + struct intel_dbuf_state *new_dbuf_state = + intel_atomic_get_new_dbuf_state(state); + struct intel_crtc_state *crtc_state; + struct skl_ddb_entry ddb_slices; + enum pipe pipe = crtc->pipe; + unsigned int mbus_offset = 0; + u32 ddb_range_size; + u32 dbuf_slice_mask; + u32 start, end; + int ret; + + if (new_dbuf_state->weight[pipe] == 0) { + skl_ddb_entry_init(&new_dbuf_state->ddb[pipe], 0, 0); + goto out; + } + + dbuf_slice_mask = new_dbuf_state->slices[pipe]; + + skl_ddb_entry_for_slices(i915, dbuf_slice_mask, &ddb_slices); + mbus_offset = mbus_ddb_offset(i915, dbuf_slice_mask); + ddb_range_size = skl_ddb_entry_size(&ddb_slices); + + intel_crtc_dbuf_weights(new_dbuf_state, pipe, + &weight_start, &weight_end, &weight_total); + + start = ddb_range_size * weight_start / weight_total; + end = ddb_range_size * weight_end / weight_total; + + skl_ddb_entry_init(&new_dbuf_state->ddb[pipe], + ddb_slices.start - mbus_offset + start, + ddb_slices.start - mbus_offset + end); + +out: + if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe] && + skl_ddb_entry_equal(&old_dbuf_state->ddb[pipe], + &new_dbuf_state->ddb[pipe])) + return 0; + + ret = intel_atomic_lock_global_state(&new_dbuf_state->base); + if (ret) + return ret; + + crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + /* + * Used for checking overlaps, so we need absolute + * offsets instead of MBUS relative offsets. + */ + crtc_state->wm.skl.ddb.start = mbus_offset + new_dbuf_state->ddb[pipe].start; + crtc_state->wm.skl.ddb.end = mbus_offset + new_dbuf_state->ddb[pipe].end; + + drm_dbg_kms(&i915->drm, + "[CRTC:%d:%s] dbuf slices 0x%x -> 0x%x, ddb (%d - %d) -> (%d - %d), active pipes 0x%x -> 0x%x\n", + crtc->base.base.id, crtc->base.name, + old_dbuf_state->slices[pipe], new_dbuf_state->slices[pipe], + old_dbuf_state->ddb[pipe].start, old_dbuf_state->ddb[pipe].end, + new_dbuf_state->ddb[pipe].start, new_dbuf_state->ddb[pipe].end, + old_dbuf_state->active_pipes, new_dbuf_state->active_pipes); + + return 0; +} + +static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state, + int width, const struct drm_format_info *format, + u64 modifier, unsigned int rotation, + u32 plane_pixel_rate, struct skl_wm_params *wp, + int color_plane); + +static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, + struct intel_plane *plane, + int level, + unsigned int latency, + const struct skl_wm_params *wp, + const struct skl_wm_level *result_prev, + struct skl_wm_level *result /* out */); + +static unsigned int +skl_cursor_allocation(const struct intel_crtc_state *crtc_state, + int num_active) +{ + struct intel_plane *plane = to_intel_plane(crtc_state->uapi.crtc->cursor); + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + int level, max_level = ilk_wm_max_level(i915); + struct skl_wm_level wm = {}; + int ret, min_ddb_alloc = 0; + struct skl_wm_params wp; + + ret = skl_compute_wm_params(crtc_state, 256, + drm_format_info(DRM_FORMAT_ARGB8888), + DRM_FORMAT_MOD_LINEAR, + DRM_MODE_ROTATE_0, + crtc_state->pixel_rate, &wp, 0); + drm_WARN_ON(&i915->drm, ret); + + for (level = 0; level <= max_level; level++) { + unsigned int latency = i915->display.wm.skl_latency[level]; + + skl_compute_plane_wm(crtc_state, plane, level, latency, &wp, &wm, &wm); + if (wm.min_ddb_alloc == U16_MAX) + break; + + min_ddb_alloc = wm.min_ddb_alloc; + } + + return max(num_active == 1 ? 32 : 8, min_ddb_alloc); +} + +static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg) +{ + skl_ddb_entry_init(entry, + REG_FIELD_GET(PLANE_BUF_START_MASK, reg), + REG_FIELD_GET(PLANE_BUF_END_MASK, reg)); + if (entry->end) + entry->end++; +} + +static void +skl_ddb_get_hw_plane_state(struct drm_i915_private *i915, + const enum pipe pipe, + const enum plane_id plane_id, + struct skl_ddb_entry *ddb, + struct skl_ddb_entry *ddb_y) +{ + u32 val; + + /* Cursor doesn't support NV12/planar, so no extra calculation needed */ + if (plane_id == PLANE_CURSOR) { + val = intel_uncore_read(&i915->uncore, CUR_BUF_CFG(pipe)); + skl_ddb_entry_init_from_hw(ddb, val); + return; + } + + val = intel_uncore_read(&i915->uncore, PLANE_BUF_CFG(pipe, plane_id)); + skl_ddb_entry_init_from_hw(ddb, val); + + if (DISPLAY_VER(i915) >= 11) + return; + + val = intel_uncore_read(&i915->uncore, PLANE_NV12_BUF_CFG(pipe, plane_id)); + skl_ddb_entry_init_from_hw(ddb_y, val); +} + +static void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc, + struct skl_ddb_entry *ddb, + struct skl_ddb_entry *ddb_y) +{ + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + enum intel_display_power_domain power_domain; + enum pipe pipe = crtc->pipe; + intel_wakeref_t wakeref; + enum plane_id plane_id; + + power_domain = POWER_DOMAIN_PIPE(pipe); + wakeref = intel_display_power_get_if_enabled(i915, power_domain); + if (!wakeref) + return; + + for_each_plane_id_on_crtc(crtc, plane_id) + skl_ddb_get_hw_plane_state(i915, pipe, + plane_id, + &ddb[plane_id], + &ddb_y[plane_id]); + + intel_display_power_put(i915, power_domain, wakeref); +} + +struct dbuf_slice_conf_entry { + u8 active_pipes; + u8 dbuf_mask[I915_MAX_PIPES]; + bool join_mbus; +}; + +/* + * Table taken from Bspec 12716 + * Pipes do have some preferred DBuf slice affinity, + * plus there are some hardcoded requirements on how + * those should be distributed for multipipe scenarios. + * For more DBuf slices algorithm can get even more messy + * and less readable, so decided to use a table almost + * as is from BSpec itself - that way it is at least easier + * to compare, change and check. + */ +static const struct dbuf_slice_conf_entry icl_allowed_dbufs[] = +/* Autogenerated with igt/tools/intel_dbuf_map tool: */ +{ + { + .active_pipes = BIT(PIPE_A), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1), + }, + }, + { + .active_pipes = BIT(PIPE_B), + .dbuf_mask = { + [PIPE_B] = BIT(DBUF_S1), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_B), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1), + [PIPE_B] = BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_C), + .dbuf_mask = { + [PIPE_C] = BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_C), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1), + [PIPE_C] = BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_B) | BIT(PIPE_C), + .dbuf_mask = { + [PIPE_B] = BIT(DBUF_S1), + [PIPE_C] = BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1), + [PIPE_B] = BIT(DBUF_S1), + [PIPE_C] = BIT(DBUF_S2), + }, + }, + {} +}; + +/* + * Table taken from Bspec 49255 + * Pipes do have some preferred DBuf slice affinity, + * plus there are some hardcoded requirements on how + * those should be distributed for multipipe scenarios. + * For more DBuf slices algorithm can get even more messy + * and less readable, so decided to use a table almost + * as is from BSpec itself - that way it is at least easier + * to compare, change and check. + */ +static const struct dbuf_slice_conf_entry tgl_allowed_dbufs[] = +/* Autogenerated with igt/tools/intel_dbuf_map tool: */ +{ + { + .active_pipes = BIT(PIPE_A), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_B), + .dbuf_mask = { + [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_B), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S2), + [PIPE_B] = BIT(DBUF_S1), + }, + }, + { + .active_pipes = BIT(PIPE_C), + .dbuf_mask = { + [PIPE_C] = BIT(DBUF_S2) | BIT(DBUF_S1), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_C), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1), + [PIPE_C] = BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_B) | BIT(PIPE_C), + .dbuf_mask = { + [PIPE_B] = BIT(DBUF_S1), + [PIPE_C] = BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1), + [PIPE_B] = BIT(DBUF_S1), + [PIPE_C] = BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_D), + .dbuf_mask = { + [PIPE_D] = BIT(DBUF_S2) | BIT(DBUF_S1), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_D), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1), + [PIPE_D] = BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_B) | BIT(PIPE_D), + .dbuf_mask = { + [PIPE_B] = BIT(DBUF_S1), + [PIPE_D] = BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1), + [PIPE_B] = BIT(DBUF_S1), + [PIPE_D] = BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_C) | BIT(PIPE_D), + .dbuf_mask = { + [PIPE_C] = BIT(DBUF_S1), + [PIPE_D] = BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1), + [PIPE_C] = BIT(DBUF_S2), + [PIPE_D] = BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), + .dbuf_mask = { + [PIPE_B] = BIT(DBUF_S1), + [PIPE_C] = BIT(DBUF_S2), + [PIPE_D] = BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1), + [PIPE_B] = BIT(DBUF_S1), + [PIPE_C] = BIT(DBUF_S2), + [PIPE_D] = BIT(DBUF_S2), + }, + }, + {} +}; + +static const struct dbuf_slice_conf_entry dg2_allowed_dbufs[] = { + { + .active_pipes = BIT(PIPE_A), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_B), + .dbuf_mask = { + [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_B), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1), + [PIPE_B] = BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_C), + .dbuf_mask = { + [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_C), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), + [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), + }, + }, + { + .active_pipes = BIT(PIPE_B) | BIT(PIPE_C), + .dbuf_mask = { + [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2), + [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1), + [PIPE_B] = BIT(DBUF_S2), + [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), + }, + }, + { + .active_pipes = BIT(PIPE_D), + .dbuf_mask = { + [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_D), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), + [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4), + }, + }, + { + .active_pipes = BIT(PIPE_B) | BIT(PIPE_D), + .dbuf_mask = { + [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2), + [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1), + [PIPE_B] = BIT(DBUF_S2), + [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4), + }, + }, + { + .active_pipes = BIT(PIPE_C) | BIT(PIPE_D), + .dbuf_mask = { + [PIPE_C] = BIT(DBUF_S3), + [PIPE_D] = BIT(DBUF_S4), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), + [PIPE_C] = BIT(DBUF_S3), + [PIPE_D] = BIT(DBUF_S4), + }, + }, + { + .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), + .dbuf_mask = { + [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2), + [PIPE_C] = BIT(DBUF_S3), + [PIPE_D] = BIT(DBUF_S4), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1), + [PIPE_B] = BIT(DBUF_S2), + [PIPE_C] = BIT(DBUF_S3), + [PIPE_D] = BIT(DBUF_S4), + }, + }, + {} +}; + +static const struct dbuf_slice_conf_entry adlp_allowed_dbufs[] = { + /* + * Keep the join_mbus cases first so check_mbus_joined() + * will prefer them over the !join_mbus cases. + */ + { + .active_pipes = BIT(PIPE_A), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4), + }, + .join_mbus = true, + }, + { + .active_pipes = BIT(PIPE_B), + .dbuf_mask = { + [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4), + }, + .join_mbus = true, + }, + { + .active_pipes = BIT(PIPE_A), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), + }, + .join_mbus = false, + }, + { + .active_pipes = BIT(PIPE_B), + .dbuf_mask = { + [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4), + }, + .join_mbus = false, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_B), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), + [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4), + }, + }, + { + .active_pipes = BIT(PIPE_C), + .dbuf_mask = { + [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_C), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), + [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), + }, + }, + { + .active_pipes = BIT(PIPE_B) | BIT(PIPE_C), + .dbuf_mask = { + [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4), + [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), + [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4), + [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), + }, + }, + { + .active_pipes = BIT(PIPE_D), + .dbuf_mask = { + [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_D), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), + [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_B) | BIT(PIPE_D), + .dbuf_mask = { + [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4), + [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), + [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4), + [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_C) | BIT(PIPE_D), + .dbuf_mask = { + [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), + [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), + [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), + [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), + .dbuf_mask = { + [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4), + [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), + [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), + [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4), + [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), + [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2), + }, + }, + {} + +}; + +static bool check_mbus_joined(u8 active_pipes, + const struct dbuf_slice_conf_entry *dbuf_slices) +{ + int i; + + for (i = 0; dbuf_slices[i].active_pipes != 0; i++) { + if (dbuf_slices[i].active_pipes == active_pipes) + return dbuf_slices[i].join_mbus; + } + return false; +} + +static bool adlp_check_mbus_joined(u8 active_pipes) +{ + return check_mbus_joined(active_pipes, adlp_allowed_dbufs); +} + +static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus, + const struct dbuf_slice_conf_entry *dbuf_slices) +{ + int i; + + for (i = 0; dbuf_slices[i].active_pipes != 0; i++) { + if (dbuf_slices[i].active_pipes == active_pipes && + dbuf_slices[i].join_mbus == join_mbus) + return dbuf_slices[i].dbuf_mask[pipe]; + } + return 0; +} + +/* + * This function finds an entry with same enabled pipe configuration and + * returns correspondent DBuf slice mask as stated in BSpec for particular + * platform. + */ +static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus) +{ + /* + * FIXME: For ICL this is still a bit unclear as prev BSpec revision + * required calculating "pipe ratio" in order to determine + * if one or two slices can be used for single pipe configurations + * as additional constraint to the existing table. + * However based on recent info, it should be not "pipe ratio" + * but rather ratio between pixel_rate and cdclk with additional + * constants, so for now we are using only table until this is + * clarified. Also this is the reason why crtc_state param is + * still here - we will need it once those additional constraints + * pop up. + */ + return compute_dbuf_slices(pipe, active_pipes, join_mbus, + icl_allowed_dbufs); +} + +static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus) +{ + return compute_dbuf_slices(pipe, active_pipes, join_mbus, + tgl_allowed_dbufs); +} + +static u8 adlp_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus) +{ + return compute_dbuf_slices(pipe, active_pipes, join_mbus, + adlp_allowed_dbufs); +} + +static u8 dg2_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus) +{ + return compute_dbuf_slices(pipe, active_pipes, join_mbus, + dg2_allowed_dbufs); +} + +static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes, bool join_mbus) +{ + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + + if (IS_DG2(i915)) + return dg2_compute_dbuf_slices(pipe, active_pipes, join_mbus); + else if (DISPLAY_VER(i915) >= 13) + return adlp_compute_dbuf_slices(pipe, active_pipes, join_mbus); + else if (DISPLAY_VER(i915) == 12) + return tgl_compute_dbuf_slices(pipe, active_pipes, join_mbus); + else if (DISPLAY_VER(i915) == 11) + return icl_compute_dbuf_slices(pipe, active_pipes, join_mbus); + /* + * For anything else just return one slice yet. + * Should be extended for other platforms. + */ + return active_pipes & BIT(pipe) ? BIT(DBUF_S1) : 0; +} + +static bool +use_minimal_wm0_only(const struct intel_crtc_state *crtc_state, + struct intel_plane *plane) +{ + struct drm_i915_private *i915 = to_i915(plane->base.dev); + + return DISPLAY_VER(i915) >= 13 && + crtc_state->uapi.async_flip && + plane->async_flip; +} + +static u64 +skl_total_relative_data_rate(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + enum plane_id plane_id; + u64 data_rate = 0; + + for_each_plane_id_on_crtc(crtc, plane_id) { + if (plane_id == PLANE_CURSOR) + continue; + + data_rate += crtc_state->rel_data_rate[plane_id]; + + if (DISPLAY_VER(i915) < 11) + data_rate += crtc_state->rel_data_rate_y[plane_id]; + } + + return data_rate; +} + +static const struct skl_wm_level * +skl_plane_wm_level(const struct skl_pipe_wm *pipe_wm, + enum plane_id plane_id, + int level) +{ + const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id]; + + if (level == 0 && pipe_wm->use_sagv_wm) + return &wm->sagv.wm0; + + return &wm->wm[level]; +} + +static const struct skl_wm_level * +skl_plane_trans_wm(const struct skl_pipe_wm *pipe_wm, + enum plane_id plane_id) +{ + const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id]; + + if (pipe_wm->use_sagv_wm) + return &wm->sagv.trans_wm; + + return &wm->trans_wm; +} + +/* + * We only disable the watermarks for each plane if + * they exceed the ddb allocation of said plane. This + * is done so that we don't end up touching cursor + * watermarks needlessly when some other plane reduces + * our max possible watermark level. + * + * Bspec has this to say about the PLANE_WM enable bit: + * "All the watermarks at this level for all enabled + * planes must be enabled before the level will be used." + * So this is actually safe to do. + */ +static void +skl_check_wm_level(struct skl_wm_level *wm, const struct skl_ddb_entry *ddb) +{ + if (wm->min_ddb_alloc > skl_ddb_entry_size(ddb)) + memset(wm, 0, sizeof(*wm)); +} + +static void +skl_check_nv12_wm_level(struct skl_wm_level *wm, struct skl_wm_level *uv_wm, + const struct skl_ddb_entry *ddb_y, const struct skl_ddb_entry *ddb) +{ + if (wm->min_ddb_alloc > skl_ddb_entry_size(ddb_y) || + uv_wm->min_ddb_alloc > skl_ddb_entry_size(ddb)) { + memset(wm, 0, sizeof(*wm)); + memset(uv_wm, 0, sizeof(*uv_wm)); + } +} + +static bool icl_need_wm1_wa(struct drm_i915_private *i915, + enum plane_id plane_id) +{ + /* + * Wa_1408961008:icl, ehl + * Wa_14012656716:tgl, adl + * Underruns with WM1+ disabled + */ + return DISPLAY_VER(i915) == 11 || + (IS_DISPLAY_VER(i915, 12, 13) && plane_id == PLANE_CURSOR); +} + +struct skl_plane_ddb_iter { + u64 data_rate; + u16 start, size; +}; + +static void +skl_allocate_plane_ddb(struct skl_plane_ddb_iter *iter, + struct skl_ddb_entry *ddb, + const struct skl_wm_level *wm, + u64 data_rate) +{ + u16 size, extra = 0; + + if (data_rate) { + extra = min_t(u16, iter->size, + DIV64_U64_ROUND_UP(iter->size * data_rate, + iter->data_rate)); + iter->size -= extra; + iter->data_rate -= data_rate; + } + + /* + * Keep ddb entry of all disabled planes explicitly zeroed + * to avoid skl_ddb_add_affected_planes() adding them to + * the state when other planes change their allocations. + */ + size = wm->min_ddb_alloc + extra; + if (size) + iter->start = skl_ddb_entry_init(ddb, iter->start, + iter->start + size); +} + +static int +skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct intel_dbuf_state *dbuf_state = + intel_atomic_get_new_dbuf_state(state); + const struct skl_ddb_entry *alloc = &dbuf_state->ddb[crtc->pipe]; + int num_active = hweight8(dbuf_state->active_pipes); + struct skl_plane_ddb_iter iter; + enum plane_id plane_id; + u16 cursor_size; + u32 blocks; + int level; + + /* Clear the partitioning for disabled planes. */ + memset(crtc_state->wm.skl.plane_ddb, 0, sizeof(crtc_state->wm.skl.plane_ddb)); + memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y)); + + if (!crtc_state->hw.active) + return 0; + + iter.start = alloc->start; + iter.size = skl_ddb_entry_size(alloc); + if (iter.size == 0) + return 0; + + /* Allocate fixed number of blocks for cursor. */ + cursor_size = skl_cursor_allocation(crtc_state, num_active); + iter.size -= cursor_size; + skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb[PLANE_CURSOR], + alloc->end - cursor_size, alloc->end); + + iter.data_rate = skl_total_relative_data_rate(crtc_state); + + /* + * Find the highest watermark level for which we can satisfy the block + * requirement of active planes. + */ + for (level = ilk_wm_max_level(i915); level >= 0; level--) { + blocks = 0; + for_each_plane_id_on_crtc(crtc, plane_id) { + const struct skl_plane_wm *wm = + &crtc_state->wm.skl.optimal.planes[plane_id]; + + if (plane_id == PLANE_CURSOR) { + const struct skl_ddb_entry *ddb = + &crtc_state->wm.skl.plane_ddb[plane_id]; + + if (wm->wm[level].min_ddb_alloc > skl_ddb_entry_size(ddb)) { + drm_WARN_ON(&i915->drm, + wm->wm[level].min_ddb_alloc != U16_MAX); + blocks = U32_MAX; + break; + } + continue; + } + + blocks += wm->wm[level].min_ddb_alloc; + blocks += wm->uv_wm[level].min_ddb_alloc; + } + + if (blocks <= iter.size) { + iter.size -= blocks; + break; + } + } + + if (level < 0) { + drm_dbg_kms(&i915->drm, + "Requested display configuration exceeds system DDB limitations"); + drm_dbg_kms(&i915->drm, "minimum required %d/%d\n", + blocks, iter.size); + return -EINVAL; + } + + /* avoid the WARN later when we don't allocate any extra DDB */ + if (iter.data_rate == 0) + iter.size = 0; + + /* + * Grant each plane the blocks it requires at the highest achievable + * watermark level, plus an extra share of the leftover blocks + * proportional to its relative data rate. + */ + for_each_plane_id_on_crtc(crtc, plane_id) { + struct skl_ddb_entry *ddb = + &crtc_state->wm.skl.plane_ddb[plane_id]; + struct skl_ddb_entry *ddb_y = + &crtc_state->wm.skl.plane_ddb_y[plane_id]; + const struct skl_plane_wm *wm = + &crtc_state->wm.skl.optimal.planes[plane_id]; + + if (plane_id == PLANE_CURSOR) + continue; + + if (DISPLAY_VER(i915) < 11 && + crtc_state->nv12_planes & BIT(plane_id)) { + skl_allocate_plane_ddb(&iter, ddb_y, &wm->wm[level], + crtc_state->rel_data_rate_y[plane_id]); + skl_allocate_plane_ddb(&iter, ddb, &wm->uv_wm[level], + crtc_state->rel_data_rate[plane_id]); + } else { + skl_allocate_plane_ddb(&iter, ddb, &wm->wm[level], + crtc_state->rel_data_rate[plane_id]); + } + } + drm_WARN_ON(&i915->drm, iter.size != 0 || iter.data_rate != 0); + + /* + * When we calculated watermark values we didn't know how high + * of a level we'd actually be able to hit, so we just marked + * all levels as "enabled." Go back now and disable the ones + * that aren't actually possible. + */ + for (level++; level <= ilk_wm_max_level(i915); level++) { + for_each_plane_id_on_crtc(crtc, plane_id) { + const struct skl_ddb_entry *ddb = + &crtc_state->wm.skl.plane_ddb[plane_id]; + const struct skl_ddb_entry *ddb_y = + &crtc_state->wm.skl.plane_ddb_y[plane_id]; + struct skl_plane_wm *wm = + &crtc_state->wm.skl.optimal.planes[plane_id]; + + if (DISPLAY_VER(i915) < 11 && + crtc_state->nv12_planes & BIT(plane_id)) + skl_check_nv12_wm_level(&wm->wm[level], + &wm->uv_wm[level], + ddb_y, ddb); + else + skl_check_wm_level(&wm->wm[level], ddb); + + if (icl_need_wm1_wa(i915, plane_id) && + level == 1 && wm->wm[0].enable) { + wm->wm[level].blocks = wm->wm[0].blocks; + wm->wm[level].lines = wm->wm[0].lines; + wm->wm[level].ignore_lines = wm->wm[0].ignore_lines; + } + } + } + + /* + * Go back and disable the transition and SAGV watermarks + * if it turns out we don't have enough DDB blocks for them. + */ + for_each_plane_id_on_crtc(crtc, plane_id) { + const struct skl_ddb_entry *ddb = + &crtc_state->wm.skl.plane_ddb[plane_id]; + const struct skl_ddb_entry *ddb_y = + &crtc_state->wm.skl.plane_ddb_y[plane_id]; + struct skl_plane_wm *wm = + &crtc_state->wm.skl.optimal.planes[plane_id]; + + if (DISPLAY_VER(i915) < 11 && + crtc_state->nv12_planes & BIT(plane_id)) { + skl_check_wm_level(&wm->trans_wm, ddb_y); + } else { + WARN_ON(skl_ddb_entry_size(ddb_y)); + + skl_check_wm_level(&wm->trans_wm, ddb); + } + + skl_check_wm_level(&wm->sagv.wm0, ddb); + skl_check_wm_level(&wm->sagv.trans_wm, ddb); + } + + return 0; +} + +/* + * The max latency should be 257 (max the punit can code is 255 and we add 2us + * for the read latency) and cpp should always be <= 8, so that + * should allow pixel_rate up to ~2 GHz which seems sufficient since max + * 2xcdclk is 1350 MHz and the pixel rate should never exceed that. + */ +static uint_fixed_16_16_t +skl_wm_method1(const struct drm_i915_private *i915, u32 pixel_rate, + u8 cpp, u32 latency, u32 dbuf_block_size) +{ + u32 wm_intermediate_val; + uint_fixed_16_16_t ret; + + if (latency == 0) + return FP_16_16_MAX; + + wm_intermediate_val = latency * pixel_rate * cpp; + ret = div_fixed16(wm_intermediate_val, 1000 * dbuf_block_size); + + if (DISPLAY_VER(i915) >= 10) + ret = add_fixed16_u32(ret, 1); + + return ret; +} + +static uint_fixed_16_16_t +skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency, + uint_fixed_16_16_t plane_blocks_per_line) +{ + u32 wm_intermediate_val; + uint_fixed_16_16_t ret; + + if (latency == 0) + return FP_16_16_MAX; + + wm_intermediate_val = latency * pixel_rate; + wm_intermediate_val = DIV_ROUND_UP(wm_intermediate_val, + pipe_htotal * 1000); + ret = mul_u32_fixed16(wm_intermediate_val, plane_blocks_per_line); + return ret; +} + +static uint_fixed_16_16_t +intel_get_linetime_us(const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + u32 pixel_rate; + u32 crtc_htotal; + uint_fixed_16_16_t linetime_us; + + if (!crtc_state->hw.active) + return u32_to_fixed16(0); + + pixel_rate = crtc_state->pixel_rate; + + if (drm_WARN_ON(&i915->drm, pixel_rate == 0)) + return u32_to_fixed16(0); + + crtc_htotal = crtc_state->hw.pipe_mode.crtc_htotal; + linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate); + + return linetime_us; +} + +static int +skl_compute_wm_params(const struct intel_crtc_state *crtc_state, + int width, const struct drm_format_info *format, + u64 modifier, unsigned int rotation, + u32 plane_pixel_rate, struct skl_wm_params *wp, + int color_plane) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + u32 interm_pbpl; + + /* only planar format has two planes */ + if (color_plane == 1 && + !intel_format_info_is_yuv_semiplanar(format, modifier)) { + drm_dbg_kms(&i915->drm, + "Non planar format have single plane\n"); + return -EINVAL; + } + + wp->y_tiled = modifier == I915_FORMAT_MOD_Y_TILED || + modifier == I915_FORMAT_MOD_4_TILED || + modifier == I915_FORMAT_MOD_Yf_TILED || + modifier == I915_FORMAT_MOD_Y_TILED_CCS || + modifier == I915_FORMAT_MOD_Yf_TILED_CCS; + wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED; + wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS || + modifier == I915_FORMAT_MOD_Yf_TILED_CCS; + wp->is_planar = intel_format_info_is_yuv_semiplanar(format, modifier); + + wp->width = width; + if (color_plane == 1 && wp->is_planar) + wp->width /= 2; + + wp->cpp = format->cpp[color_plane]; + wp->plane_pixel_rate = plane_pixel_rate; + + if (DISPLAY_VER(i915) >= 11 && + modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 1) + wp->dbuf_block_size = 256; + else + wp->dbuf_block_size = 512; + + if (drm_rotation_90_or_270(rotation)) { + switch (wp->cpp) { + case 1: + wp->y_min_scanlines = 16; + break; + case 2: + wp->y_min_scanlines = 8; + break; + case 4: + wp->y_min_scanlines = 4; + break; + default: + MISSING_CASE(wp->cpp); + return -EINVAL; + } + } else { + wp->y_min_scanlines = 4; + } + + if (skl_needs_memory_bw_wa(i915)) + wp->y_min_scanlines *= 2; + + wp->plane_bytes_per_line = wp->width * wp->cpp; + if (wp->y_tiled) { + interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line * + wp->y_min_scanlines, + wp->dbuf_block_size); + + if (DISPLAY_VER(i915) >= 10) + interm_pbpl++; + + wp->plane_blocks_per_line = div_fixed16(interm_pbpl, + wp->y_min_scanlines); + } else { + interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line, + wp->dbuf_block_size); + + if (!wp->x_tiled || DISPLAY_VER(i915) >= 10) + interm_pbpl++; + + wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl); + } + + wp->y_tile_minimum = mul_u32_fixed16(wp->y_min_scanlines, + wp->plane_blocks_per_line); + + wp->linetime_us = fixed16_to_u32_round_up(intel_get_linetime_us(crtc_state)); + + return 0; +} + +static int +skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + struct skl_wm_params *wp, int color_plane) +{ + const struct drm_framebuffer *fb = plane_state->hw.fb; + int width; + + /* + * Src coordinates are already rotated by 270 degrees for + * the 90/270 degree plane rotation cases (to match the + * GTT mapping), hence no need to account for rotation here. + */ + width = drm_rect_width(&plane_state->uapi.src) >> 16; + + return skl_compute_wm_params(crtc_state, width, + fb->format, fb->modifier, + plane_state->hw.rotation, + intel_plane_pixel_rate(crtc_state, plane_state), + wp, color_plane); +} + +static bool skl_wm_has_lines(struct drm_i915_private *i915, int level) +{ + if (DISPLAY_VER(i915) >= 10) + return true; + + /* The number of lines are ignored for the level 0 watermark. */ + return level > 0; +} + +static int skl_wm_max_lines(struct drm_i915_private *i915) +{ + if (DISPLAY_VER(i915) >= 13) + return 255; + else + return 31; +} + +static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, + struct intel_plane *plane, + int level, + unsigned int latency, + const struct skl_wm_params *wp, + const struct skl_wm_level *result_prev, + struct skl_wm_level *result /* out */) +{ + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + uint_fixed_16_16_t method1, method2; + uint_fixed_16_16_t selected_result; + u32 blocks, lines, min_ddb_alloc = 0; + + if (latency == 0 || + (use_minimal_wm0_only(crtc_state, plane) && level > 0)) { + /* reject it */ + result->min_ddb_alloc = U16_MAX; + return; + } + + /* + * WaIncreaseLatencyIPCEnabled: kbl,cfl + * Display WA #1141: kbl,cfl + */ + if ((IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) && + skl_watermark_ipc_enabled(i915)) + latency += 4; + + if (skl_needs_memory_bw_wa(i915) && wp->x_tiled) + latency += 15; + + method1 = skl_wm_method1(i915, wp->plane_pixel_rate, + wp->cpp, latency, wp->dbuf_block_size); + method2 = skl_wm_method2(wp->plane_pixel_rate, + crtc_state->hw.pipe_mode.crtc_htotal, + latency, + wp->plane_blocks_per_line); + + if (wp->y_tiled) { + selected_result = max_fixed16(method2, wp->y_tile_minimum); + } else { + if ((wp->cpp * crtc_state->hw.pipe_mode.crtc_htotal / + wp->dbuf_block_size < 1) && + (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) { + selected_result = method2; + } else if (latency >= wp->linetime_us) { + if (DISPLAY_VER(i915) == 9) + selected_result = min_fixed16(method1, method2); + else + selected_result = method2; + } else { + selected_result = method1; + } + } + + blocks = fixed16_to_u32_round_up(selected_result) + 1; + /* + * Lets have blocks at minimum equivalent to plane_blocks_per_line + * as there will be at minimum one line for lines configuration. This + * is a work around for FIFO underruns observed with resolutions like + * 4k 60 Hz in single channel DRAM configurations. + * + * As per the Bspec 49325, if the ddb allocation can hold at least + * one plane_blocks_per_line, we should have selected method2 in + * the above logic. Assuming that modern versions have enough dbuf + * and method2 guarantees blocks equivalent to at least 1 line, + * select the blocks as plane_blocks_per_line. + * + * TODO: Revisit the logic when we have better understanding on DRAM + * channels' impact on the level 0 memory latency and the relevant + * wm calculations. + */ + if (skl_wm_has_lines(i915, level)) + blocks = max(blocks, + fixed16_to_u32_round_up(wp->plane_blocks_per_line)); + lines = div_round_up_fixed16(selected_result, + wp->plane_blocks_per_line); + + if (DISPLAY_VER(i915) == 9) { + /* Display WA #1125: skl,bxt,kbl */ + if (level == 0 && wp->rc_surface) + blocks += fixed16_to_u32_round_up(wp->y_tile_minimum); + + /* Display WA #1126: skl,bxt,kbl */ + if (level >= 1 && level <= 7) { + if (wp->y_tiled) { + blocks += fixed16_to_u32_round_up(wp->y_tile_minimum); + lines += wp->y_min_scanlines; + } else { + blocks++; + } + + /* + * Make sure result blocks for higher latency levels are + * at least as high as level below the current level. + * Assumption in DDB algorithm optimization for special + * cases. Also covers Display WA #1125 for RC. + */ + if (result_prev->blocks > blocks) + blocks = result_prev->blocks; + } + } + + if (DISPLAY_VER(i915) >= 11) { + if (wp->y_tiled) { + int extra_lines; + + if (lines % wp->y_min_scanlines == 0) + extra_lines = wp->y_min_scanlines; + else + extra_lines = wp->y_min_scanlines * 2 - + lines % wp->y_min_scanlines; + + min_ddb_alloc = mul_round_up_u32_fixed16(lines + extra_lines, + wp->plane_blocks_per_line); + } else { + min_ddb_alloc = blocks + DIV_ROUND_UP(blocks, 10); + } + } + + if (!skl_wm_has_lines(i915, level)) + lines = 0; + + if (lines > skl_wm_max_lines(i915)) { + /* reject it */ + result->min_ddb_alloc = U16_MAX; + return; + } + + /* + * If lines is valid, assume we can use this watermark level + * for now. We'll come back and disable it after we calculate the + * DDB allocation if it turns out we don't actually have enough + * blocks to satisfy it. + */ + result->blocks = blocks; + result->lines = lines; + /* Bspec says: value >= plane ddb allocation -> invalid, hence the +1 here */ + result->min_ddb_alloc = max(min_ddb_alloc, blocks) + 1; + result->enable = true; + + if (DISPLAY_VER(i915) < 12 && i915->display.sagv.block_time_us) + result->can_sagv = latency >= i915->display.sagv.block_time_us; +} + +static void +skl_compute_wm_levels(const struct intel_crtc_state *crtc_state, + struct intel_plane *plane, + const struct skl_wm_params *wm_params, + struct skl_wm_level *levels) +{ + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + int level, max_level = ilk_wm_max_level(i915); + struct skl_wm_level *result_prev = &levels[0]; + + for (level = 0; level <= max_level; level++) { + struct skl_wm_level *result = &levels[level]; + unsigned int latency = i915->display.wm.skl_latency[level]; + + skl_compute_plane_wm(crtc_state, plane, level, latency, + wm_params, result_prev, result); + + result_prev = result; + } +} + +static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state, + struct intel_plane *plane, + const struct skl_wm_params *wm_params, + struct skl_plane_wm *plane_wm) +{ + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct skl_wm_level *sagv_wm = &plane_wm->sagv.wm0; + struct skl_wm_level *levels = plane_wm->wm; + unsigned int latency = 0; + + if (i915->display.sagv.block_time_us) + latency = i915->display.sagv.block_time_us + i915->display.wm.skl_latency[0]; + + skl_compute_plane_wm(crtc_state, plane, 0, latency, + wm_params, &levels[0], + sagv_wm); +} + +static void skl_compute_transition_wm(struct drm_i915_private *i915, + struct skl_wm_level *trans_wm, + const struct skl_wm_level *wm0, + const struct skl_wm_params *wp) +{ + u16 trans_min, trans_amount, trans_y_tile_min; + u16 wm0_blocks, trans_offset, blocks; + + /* Transition WM don't make any sense if ipc is disabled */ + if (!skl_watermark_ipc_enabled(i915)) + return; + + /* + * WaDisableTWM:skl,kbl,cfl,bxt + * Transition WM are not recommended by HW team for GEN9 + */ + if (DISPLAY_VER(i915) == 9) + return; + + if (DISPLAY_VER(i915) >= 11) + trans_min = 4; + else + trans_min = 14; + + /* Display WA #1140: glk,cnl */ + if (DISPLAY_VER(i915) == 10) + trans_amount = 0; + else + trans_amount = 10; /* This is configurable amount */ + + trans_offset = trans_min + trans_amount; + + /* + * The spec asks for Selected Result Blocks for wm0 (the real value), + * not Result Blocks (the integer value). Pay attention to the capital + * letters. The value wm_l0->blocks is actually Result Blocks, but + * since Result Blocks is the ceiling of Selected Result Blocks plus 1, + * and since we later will have to get the ceiling of the sum in the + * transition watermarks calculation, we can just pretend Selected + * Result Blocks is Result Blocks minus 1 and it should work for the + * current platforms. + */ + wm0_blocks = wm0->blocks - 1; + + if (wp->y_tiled) { + trans_y_tile_min = + (u16)mul_round_up_u32_fixed16(2, wp->y_tile_minimum); + blocks = max(wm0_blocks, trans_y_tile_min) + trans_offset; + } else { + blocks = wm0_blocks + trans_offset; + } + blocks++; + + /* + * Just assume we can enable the transition watermark. After + * computing the DDB we'll come back and disable it if that + * assumption turns out to be false. + */ + trans_wm->blocks = blocks; + trans_wm->min_ddb_alloc = max_t(u16, wm0->min_ddb_alloc, blocks + 1); + trans_wm->enable = true; +} + +static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + struct intel_plane *plane, int color_plane) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id]; + struct skl_wm_params wm_params; + int ret; + + ret = skl_compute_plane_wm_params(crtc_state, plane_state, + &wm_params, color_plane); + if (ret) + return ret; + + skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->wm); + + skl_compute_transition_wm(i915, &wm->trans_wm, + &wm->wm[0], &wm_params); + + if (DISPLAY_VER(i915) >= 12) { + tgl_compute_sagv_wm(crtc_state, plane, &wm_params, wm); + + skl_compute_transition_wm(i915, &wm->sagv.trans_wm, + &wm->sagv.wm0, &wm_params); + } + + return 0; +} + +static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + struct intel_plane *plane) +{ + struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id]; + struct skl_wm_params wm_params; + int ret; + + wm->is_planar = true; + + /* uv plane watermarks must also be validated for NV12/Planar */ + ret = skl_compute_plane_wm_params(crtc_state, plane_state, + &wm_params, 1); + if (ret) + return ret; + + skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->uv_wm); + + return 0; +} + +static int skl_build_plane_wm(struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + enum plane_id plane_id = plane->id; + struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id]; + const struct drm_framebuffer *fb = plane_state->hw.fb; + int ret; + + memset(wm, 0, sizeof(*wm)); + + if (!intel_wm_plane_visible(crtc_state, plane_state)) + return 0; + + ret = skl_build_plane_wm_single(crtc_state, plane_state, + plane, 0); + if (ret) + return ret; + + if (fb->format->is_yuv && fb->format->num_planes > 1) { + ret = skl_build_plane_wm_uv(crtc_state, plane_state, + plane); + if (ret) + return ret; + } + + return 0; +} + +static int icl_build_plane_wm(struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + struct drm_i915_private *i915 = to_i915(plane->base.dev); + enum plane_id plane_id = plane->id; + struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id]; + int ret; + + /* Watermarks calculated in master */ + if (plane_state->planar_slave) + return 0; + + memset(wm, 0, sizeof(*wm)); + + if (plane_state->planar_linked_plane) { + const struct drm_framebuffer *fb = plane_state->hw.fb; + + drm_WARN_ON(&i915->drm, + !intel_wm_plane_visible(crtc_state, plane_state)); + drm_WARN_ON(&i915->drm, !fb->format->is_yuv || + fb->format->num_planes == 1); + + ret = skl_build_plane_wm_single(crtc_state, plane_state, + plane_state->planar_linked_plane, 0); + if (ret) + return ret; + + ret = skl_build_plane_wm_single(crtc_state, plane_state, + plane, 1); + if (ret) + return ret; + } else if (intel_wm_plane_visible(crtc_state, plane_state)) { + ret = skl_build_plane_wm_single(crtc_state, plane_state, + plane, 0); + if (ret) + return ret; + } + + return 0; +} + +static int skl_build_pipe_wm(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct intel_plane_state *plane_state; + struct intel_plane *plane; + int ret, i; + + for_each_new_intel_plane_in_state(state, plane, plane_state, i) { + /* + * FIXME should perhaps check {old,new}_plane_crtc->hw.crtc + * instead but we don't populate that correctly for NV12 Y + * planes so for now hack this. + */ + if (plane->pipe != crtc->pipe) + continue; + + if (DISPLAY_VER(i915) >= 11) + ret = icl_build_plane_wm(crtc_state, plane_state); + else + ret = skl_build_plane_wm(crtc_state, plane_state); + if (ret) + return ret; + } + + crtc_state->wm.skl.optimal = crtc_state->wm.skl.raw; + + return 0; +} + +static void skl_ddb_entry_write(struct drm_i915_private *i915, + i915_reg_t reg, + const struct skl_ddb_entry *entry) +{ + if (entry->end) + intel_de_write_fw(i915, reg, + PLANE_BUF_END(entry->end - 1) | + PLANE_BUF_START(entry->start)); + else + intel_de_write_fw(i915, reg, 0); +} + +static void skl_write_wm_level(struct drm_i915_private *i915, + i915_reg_t reg, + const struct skl_wm_level *level) +{ + u32 val = 0; + + if (level->enable) + val |= PLANE_WM_EN; + if (level->ignore_lines) + val |= PLANE_WM_IGNORE_LINES; + val |= REG_FIELD_PREP(PLANE_WM_BLOCKS_MASK, level->blocks); + val |= REG_FIELD_PREP(PLANE_WM_LINES_MASK, level->lines); + + intel_de_write_fw(i915, reg, val); +} + +void skl_write_plane_wm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = to_i915(plane->base.dev); + int level, max_level = ilk_wm_max_level(i915); + enum plane_id plane_id = plane->id; + enum pipe pipe = plane->pipe; + const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal; + const struct skl_ddb_entry *ddb = + &crtc_state->wm.skl.plane_ddb[plane_id]; + const struct skl_ddb_entry *ddb_y = + &crtc_state->wm.skl.plane_ddb_y[plane_id]; + + for (level = 0; level <= max_level; level++) + skl_write_wm_level(i915, PLANE_WM(pipe, plane_id, level), + skl_plane_wm_level(pipe_wm, plane_id, level)); + + skl_write_wm_level(i915, PLANE_WM_TRANS(pipe, plane_id), + skl_plane_trans_wm(pipe_wm, plane_id)); + + if (HAS_HW_SAGV_WM(i915)) { + const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id]; + + skl_write_wm_level(i915, PLANE_WM_SAGV(pipe, plane_id), + &wm->sagv.wm0); + skl_write_wm_level(i915, PLANE_WM_SAGV_TRANS(pipe, plane_id), + &wm->sagv.trans_wm); + } + + skl_ddb_entry_write(i915, + PLANE_BUF_CFG(pipe, plane_id), ddb); + + if (DISPLAY_VER(i915) < 11) + skl_ddb_entry_write(i915, + PLANE_NV12_BUF_CFG(pipe, plane_id), ddb_y); +} + +void skl_write_cursor_wm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = to_i915(plane->base.dev); + int level, max_level = ilk_wm_max_level(i915); + enum plane_id plane_id = plane->id; + enum pipe pipe = plane->pipe; + const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal; + const struct skl_ddb_entry *ddb = + &crtc_state->wm.skl.plane_ddb[plane_id]; + + for (level = 0; level <= max_level; level++) + skl_write_wm_level(i915, CUR_WM(pipe, level), + skl_plane_wm_level(pipe_wm, plane_id, level)); + + skl_write_wm_level(i915, CUR_WM_TRANS(pipe), + skl_plane_trans_wm(pipe_wm, plane_id)); + + if (HAS_HW_SAGV_WM(i915)) { + const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id]; + + skl_write_wm_level(i915, CUR_WM_SAGV(pipe), + &wm->sagv.wm0); + skl_write_wm_level(i915, CUR_WM_SAGV_TRANS(pipe), + &wm->sagv.trans_wm); + } + + skl_ddb_entry_write(i915, CUR_BUF_CFG(pipe), ddb); +} + +static bool skl_wm_level_equals(const struct skl_wm_level *l1, + const struct skl_wm_level *l2) +{ + return l1->enable == l2->enable && + l1->ignore_lines == l2->ignore_lines && + l1->lines == l2->lines && + l1->blocks == l2->blocks; +} + +static bool skl_plane_wm_equals(struct drm_i915_private *i915, + const struct skl_plane_wm *wm1, + const struct skl_plane_wm *wm2) +{ + int level, max_level = ilk_wm_max_level(i915); + + for (level = 0; level <= max_level; level++) { + /* + * We don't check uv_wm as the hardware doesn't actually + * use it. It only gets used for calculating the required + * ddb allocation. + */ + if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level])) + return false; + } + + return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm) && + skl_wm_level_equals(&wm1->sagv.wm0, &wm2->sagv.wm0) && + skl_wm_level_equals(&wm1->sagv.trans_wm, &wm2->sagv.trans_wm); +} + +static bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a, + const struct skl_ddb_entry *b) +{ + return a->start < b->end && b->start < a->end; +} + +static void skl_ddb_entry_union(struct skl_ddb_entry *a, + const struct skl_ddb_entry *b) +{ + if (a->end && b->end) { + a->start = min(a->start, b->start); + a->end = max(a->end, b->end); + } else if (b->end) { + a->start = b->start; + a->end = b->end; + } +} + +bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb, + const struct skl_ddb_entry *entries, + int num_entries, int ignore_idx) +{ + int i; + + for (i = 0; i < num_entries; i++) { + if (i != ignore_idx && + skl_ddb_entries_overlap(ddb, &entries[i])) + return true; + } + + return false; +} + +static int +skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state, + struct intel_crtc_state *new_crtc_state) +{ + struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->uapi.state); + struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_plane *plane; + + for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) { + struct intel_plane_state *plane_state; + enum plane_id plane_id = plane->id; + + if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb[plane_id], + &new_crtc_state->wm.skl.plane_ddb[plane_id]) && + skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id], + &new_crtc_state->wm.skl.plane_ddb_y[plane_id])) + continue; + + plane_state = intel_atomic_get_plane_state(state, plane); + if (IS_ERR(plane_state)) + return PTR_ERR(plane_state); + + new_crtc_state->update_planes |= BIT(plane_id); + } + + return 0; +} + +static u8 intel_dbuf_enabled_slices(const struct intel_dbuf_state *dbuf_state) +{ + struct drm_i915_private *i915 = to_i915(dbuf_state->base.state->base.dev); + u8 enabled_slices; + enum pipe pipe; + + /* + * FIXME: For now we always enable slice S1 as per + * the Bspec display initialization sequence. + */ + enabled_slices = BIT(DBUF_S1); + + for_each_pipe(i915, pipe) + enabled_slices |= dbuf_state->slices[pipe]; + + return enabled_slices; +} + +static int +skl_compute_ddb(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_dbuf_state *old_dbuf_state; + struct intel_dbuf_state *new_dbuf_state = NULL; + const struct intel_crtc_state *old_crtc_state; + struct intel_crtc_state *new_crtc_state; + struct intel_crtc *crtc; + int ret, i; + + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { + new_dbuf_state = intel_atomic_get_dbuf_state(state); + if (IS_ERR(new_dbuf_state)) + return PTR_ERR(new_dbuf_state); + + old_dbuf_state = intel_atomic_get_old_dbuf_state(state); + break; + } + + if (!new_dbuf_state) + return 0; + + new_dbuf_state->active_pipes = + intel_calc_active_pipes(state, old_dbuf_state->active_pipes); + + if (old_dbuf_state->active_pipes != new_dbuf_state->active_pipes) { + ret = intel_atomic_lock_global_state(&new_dbuf_state->base); + if (ret) + return ret; + } + + if (HAS_MBUS_JOINING(i915)) + new_dbuf_state->joined_mbus = + adlp_check_mbus_joined(new_dbuf_state->active_pipes); + + for_each_intel_crtc(&i915->drm, crtc) { + enum pipe pipe = crtc->pipe; + + new_dbuf_state->slices[pipe] = + skl_compute_dbuf_slices(crtc, new_dbuf_state->active_pipes, + new_dbuf_state->joined_mbus); + + if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe]) + continue; + + ret = intel_atomic_lock_global_state(&new_dbuf_state->base); + if (ret) + return ret; + } + + new_dbuf_state->enabled_slices = intel_dbuf_enabled_slices(new_dbuf_state); + + if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices || + old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) { + ret = intel_atomic_serialize_global_state(&new_dbuf_state->base); + if (ret) + return ret; + + if (old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) { + /* TODO: Implement vblank synchronized MBUS joining changes */ + ret = intel_modeset_all_pipes(state); + if (ret) + return ret; + } + + drm_dbg_kms(&i915->drm, + "Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x), mbus joined? %s->%s\n", + old_dbuf_state->enabled_slices, + new_dbuf_state->enabled_slices, + INTEL_INFO(i915)->display.dbuf.slice_mask, + str_yes_no(old_dbuf_state->joined_mbus), + str_yes_no(new_dbuf_state->joined_mbus)); + } + + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { + enum pipe pipe = crtc->pipe; + + new_dbuf_state->weight[pipe] = intel_crtc_ddb_weight(new_crtc_state); + + if (old_dbuf_state->weight[pipe] == new_dbuf_state->weight[pipe]) + continue; + + ret = intel_atomic_lock_global_state(&new_dbuf_state->base); + if (ret) + return ret; + } + + for_each_intel_crtc(&i915->drm, crtc) { + ret = skl_crtc_allocate_ddb(state, crtc); + if (ret) + return ret; + } + + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) { + ret = skl_crtc_allocate_plane_ddb(state, crtc); + if (ret) + return ret; + + ret = skl_ddb_add_affected_planes(old_crtc_state, + new_crtc_state); + if (ret) + return ret; + } + + return 0; +} + +static char enast(bool enable) +{ + return enable ? '*' : ' '; +} + +static void +skl_print_wm_changes(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_crtc_state *old_crtc_state; + const struct intel_crtc_state *new_crtc_state; + struct intel_plane *plane; + struct intel_crtc *crtc; + int i; + + if (!drm_debug_enabled(DRM_UT_KMS)) + return; + + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) { + const struct skl_pipe_wm *old_pipe_wm, *new_pipe_wm; + + old_pipe_wm = &old_crtc_state->wm.skl.optimal; + new_pipe_wm = &new_crtc_state->wm.skl.optimal; + + for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) { + enum plane_id plane_id = plane->id; + const struct skl_ddb_entry *old, *new; + + old = &old_crtc_state->wm.skl.plane_ddb[plane_id]; + new = &new_crtc_state->wm.skl.plane_ddb[plane_id]; + + if (skl_ddb_entry_equal(old, new)) + continue; + + drm_dbg_kms(&i915->drm, + "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n", + plane->base.base.id, plane->base.name, + old->start, old->end, new->start, new->end, + skl_ddb_entry_size(old), skl_ddb_entry_size(new)); + } + + for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) { + enum plane_id plane_id = plane->id; + const struct skl_plane_wm *old_wm, *new_wm; + + old_wm = &old_pipe_wm->planes[plane_id]; + new_wm = &new_pipe_wm->planes[plane_id]; + + if (skl_plane_wm_equals(i915, old_wm, new_wm)) + continue; + + drm_dbg_kms(&i915->drm, + "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm" + " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n", + plane->base.base.id, plane->base.name, + enast(old_wm->wm[0].enable), enast(old_wm->wm[1].enable), + enast(old_wm->wm[2].enable), enast(old_wm->wm[3].enable), + enast(old_wm->wm[4].enable), enast(old_wm->wm[5].enable), + enast(old_wm->wm[6].enable), enast(old_wm->wm[7].enable), + enast(old_wm->trans_wm.enable), + enast(old_wm->sagv.wm0.enable), + enast(old_wm->sagv.trans_wm.enable), + enast(new_wm->wm[0].enable), enast(new_wm->wm[1].enable), + enast(new_wm->wm[2].enable), enast(new_wm->wm[3].enable), + enast(new_wm->wm[4].enable), enast(new_wm->wm[5].enable), + enast(new_wm->wm[6].enable), enast(new_wm->wm[7].enable), + enast(new_wm->trans_wm.enable), + enast(new_wm->sagv.wm0.enable), + enast(new_wm->sagv.trans_wm.enable)); + + drm_dbg_kms(&i915->drm, + "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d" + " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n", + plane->base.base.id, plane->base.name, + enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].lines, + enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].lines, + enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].lines, + enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].lines, + enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].lines, + enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].lines, + enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].lines, + enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].lines, + enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.lines, + enast(old_wm->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines, + enast(old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm.lines, + enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].lines, + enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].lines, + enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].lines, + enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].lines, + enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].lines, + enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].lines, + enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].lines, + enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].lines, + enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.lines, + enast(new_wm->sagv.wm0.ignore_lines), new_wm->sagv.wm0.lines, + enast(new_wm->sagv.trans_wm.ignore_lines), new_wm->sagv.trans_wm.lines); + + drm_dbg_kms(&i915->drm, + "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" + " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", + plane->base.base.id, plane->base.name, + old_wm->wm[0].blocks, old_wm->wm[1].blocks, + old_wm->wm[2].blocks, old_wm->wm[3].blocks, + old_wm->wm[4].blocks, old_wm->wm[5].blocks, + old_wm->wm[6].blocks, old_wm->wm[7].blocks, + old_wm->trans_wm.blocks, + old_wm->sagv.wm0.blocks, + old_wm->sagv.trans_wm.blocks, + new_wm->wm[0].blocks, new_wm->wm[1].blocks, + new_wm->wm[2].blocks, new_wm->wm[3].blocks, + new_wm->wm[4].blocks, new_wm->wm[5].blocks, + new_wm->wm[6].blocks, new_wm->wm[7].blocks, + new_wm->trans_wm.blocks, + new_wm->sagv.wm0.blocks, + new_wm->sagv.trans_wm.blocks); + + drm_dbg_kms(&i915->drm, + "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" + " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", + plane->base.base.id, plane->base.name, + old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc, + old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc, + old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc, + old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc, + old_wm->trans_wm.min_ddb_alloc, + old_wm->sagv.wm0.min_ddb_alloc, + old_wm->sagv.trans_wm.min_ddb_alloc, + new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc, + new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc, + new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc, + new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc, + new_wm->trans_wm.min_ddb_alloc, + new_wm->sagv.wm0.min_ddb_alloc, + new_wm->sagv.trans_wm.min_ddb_alloc); + } + } +} + +static bool skl_plane_selected_wm_equals(struct intel_plane *plane, + const struct skl_pipe_wm *old_pipe_wm, + const struct skl_pipe_wm *new_pipe_wm) +{ + struct drm_i915_private *i915 = to_i915(plane->base.dev); + int level, max_level = ilk_wm_max_level(i915); + + for (level = 0; level <= max_level; level++) { + /* + * We don't check uv_wm as the hardware doesn't actually + * use it. It only gets used for calculating the required + * ddb allocation. + */ + if (!skl_wm_level_equals(skl_plane_wm_level(old_pipe_wm, plane->id, level), + skl_plane_wm_level(new_pipe_wm, plane->id, level))) + return false; + } + + if (HAS_HW_SAGV_WM(i915)) { + const struct skl_plane_wm *old_wm = &old_pipe_wm->planes[plane->id]; + const struct skl_plane_wm *new_wm = &new_pipe_wm->planes[plane->id]; + + if (!skl_wm_level_equals(&old_wm->sagv.wm0, &new_wm->sagv.wm0) || + !skl_wm_level_equals(&old_wm->sagv.trans_wm, &new_wm->sagv.trans_wm)) + return false; + } + + return skl_wm_level_equals(skl_plane_trans_wm(old_pipe_wm, plane->id), + skl_plane_trans_wm(new_pipe_wm, plane->id)); +} + +/* + * To make sure the cursor watermark registers are always consistent + * with our computed state the following scenario needs special + * treatment: + * + * 1. enable cursor + * 2. move cursor entirely offscreen + * 3. disable cursor + * + * Step 2. does call .disable_plane() but does not zero the watermarks + * (since we consider an offscreen cursor still active for the purposes + * of watermarks). Step 3. would not normally call .disable_plane() + * because the actual plane visibility isn't changing, and we don't + * deallocate the cursor ddb until the pipe gets disabled. So we must + * force step 3. to call .disable_plane() to update the watermark + * registers properly. + * + * Other planes do not suffer from this issues as their watermarks are + * calculated based on the actual plane visibility. The only time this + * can trigger for the other planes is during the initial readout as the + * default value of the watermarks registers is not zero. + */ +static int skl_wm_add_affected_planes(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + struct intel_plane *plane; + + for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) { + struct intel_plane_state *plane_state; + enum plane_id plane_id = plane->id; + + /* + * Force a full wm update for every plane on modeset. + * Required because the reset value of the wm registers + * is non-zero, whereas we want all disabled planes to + * have zero watermarks. So if we turn off the relevant + * power well the hardware state will go out of sync + * with the software state. + */ + if (!drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi) && + skl_plane_selected_wm_equals(plane, + &old_crtc_state->wm.skl.optimal, + &new_crtc_state->wm.skl.optimal)) + continue; + + plane_state = intel_atomic_get_plane_state(state, plane); + if (IS_ERR(plane_state)) + return PTR_ERR(plane_state); + + new_crtc_state->update_planes |= BIT(plane_id); + } + + return 0; +} + +static int +skl_compute_wm(struct intel_atomic_state *state) +{ + struct intel_crtc *crtc; + struct intel_crtc_state *new_crtc_state; + int ret, i; + + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { + ret = skl_build_pipe_wm(state, crtc); + if (ret) + return ret; + } + + ret = skl_compute_ddb(state); + if (ret) + return ret; + + ret = intel_compute_sagv_mask(state); + if (ret) + return ret; + + /* + * skl_compute_ddb() will have adjusted the final watermarks + * based on how much ddb is available. Now we can actually + * check if the final watermarks changed. + */ + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { + ret = skl_wm_add_affected_planes(state, crtc); + if (ret) + return ret; + } + + skl_print_wm_changes(state); + + return 0; +} + +static void skl_wm_level_from_reg_val(u32 val, struct skl_wm_level *level) +{ + level->enable = val & PLANE_WM_EN; + level->ignore_lines = val & PLANE_WM_IGNORE_LINES; + level->blocks = REG_FIELD_GET(PLANE_WM_BLOCKS_MASK, val); + level->lines = REG_FIELD_GET(PLANE_WM_LINES_MASK, val); +} + +static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc, + struct skl_pipe_wm *out) +{ + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + int level, max_level; + enum plane_id plane_id; + u32 val; + + max_level = ilk_wm_max_level(i915); + + for_each_plane_id_on_crtc(crtc, plane_id) { + struct skl_plane_wm *wm = &out->planes[plane_id]; + + for (level = 0; level <= max_level; level++) { + if (plane_id != PLANE_CURSOR) + val = intel_uncore_read(&i915->uncore, PLANE_WM(pipe, plane_id, level)); + else + val = intel_uncore_read(&i915->uncore, CUR_WM(pipe, level)); + + skl_wm_level_from_reg_val(val, &wm->wm[level]); + } + + if (plane_id != PLANE_CURSOR) + val = intel_uncore_read(&i915->uncore, PLANE_WM_TRANS(pipe, plane_id)); + else + val = intel_uncore_read(&i915->uncore, CUR_WM_TRANS(pipe)); + + skl_wm_level_from_reg_val(val, &wm->trans_wm); + + if (HAS_HW_SAGV_WM(i915)) { + if (plane_id != PLANE_CURSOR) + val = intel_uncore_read(&i915->uncore, + PLANE_WM_SAGV(pipe, plane_id)); + else + val = intel_uncore_read(&i915->uncore, + CUR_WM_SAGV(pipe)); + + skl_wm_level_from_reg_val(val, &wm->sagv.wm0); + + if (plane_id != PLANE_CURSOR) + val = intel_uncore_read(&i915->uncore, + PLANE_WM_SAGV_TRANS(pipe, plane_id)); + else + val = intel_uncore_read(&i915->uncore, + CUR_WM_SAGV_TRANS(pipe)); + + skl_wm_level_from_reg_val(val, &wm->sagv.trans_wm); + } else if (DISPLAY_VER(i915) >= 12) { + wm->sagv.wm0 = wm->wm[0]; + wm->sagv.trans_wm = wm->trans_wm; + } + } +} + +void skl_wm_get_hw_state(struct drm_i915_private *i915) +{ + struct intel_dbuf_state *dbuf_state = + to_intel_dbuf_state(i915->display.dbuf.obj.state); + struct intel_crtc *crtc; + + if (HAS_MBUS_JOINING(i915)) + dbuf_state->joined_mbus = intel_de_read(i915, MBUS_CTL) & MBUS_JOIN; + + for_each_intel_crtc(&i915->drm, crtc) { + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + enum pipe pipe = crtc->pipe; + unsigned int mbus_offset; + enum plane_id plane_id; + u8 slices; + + memset(&crtc_state->wm.skl.optimal, 0, + sizeof(crtc_state->wm.skl.optimal)); + if (crtc_state->hw.active) + skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal); + crtc_state->wm.skl.raw = crtc_state->wm.skl.optimal; + + memset(&dbuf_state->ddb[pipe], 0, sizeof(dbuf_state->ddb[pipe])); + + for_each_plane_id_on_crtc(crtc, plane_id) { + struct skl_ddb_entry *ddb = + &crtc_state->wm.skl.plane_ddb[plane_id]; + struct skl_ddb_entry *ddb_y = + &crtc_state->wm.skl.plane_ddb_y[plane_id]; + + if (!crtc_state->hw.active) + continue; + + skl_ddb_get_hw_plane_state(i915, crtc->pipe, + plane_id, ddb, ddb_y); + + skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb); + skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_y); + } + + dbuf_state->weight[pipe] = intel_crtc_ddb_weight(crtc_state); + + /* + * Used for checking overlaps, so we need absolute + * offsets instead of MBUS relative offsets. + */ + slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes, + dbuf_state->joined_mbus); + mbus_offset = mbus_ddb_offset(i915, slices); + crtc_state->wm.skl.ddb.start = mbus_offset + dbuf_state->ddb[pipe].start; + crtc_state->wm.skl.ddb.end = mbus_offset + dbuf_state->ddb[pipe].end; + + /* The slices actually used by the planes on the pipe */ + dbuf_state->slices[pipe] = + skl_ddb_dbuf_slice_mask(i915, &crtc_state->wm.skl.ddb); + + drm_dbg_kms(&i915->drm, + "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n", + crtc->base.base.id, crtc->base.name, + dbuf_state->slices[pipe], dbuf_state->ddb[pipe].start, + dbuf_state->ddb[pipe].end, dbuf_state->active_pipes, + str_yes_no(dbuf_state->joined_mbus)); + } + + dbuf_state->enabled_slices = i915->display.dbuf.enabled_slices; +} + +static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915) +{ + const struct intel_dbuf_state *dbuf_state = + to_intel_dbuf_state(i915->display.dbuf.obj.state); + struct skl_ddb_entry entries[I915_MAX_PIPES] = {}; + struct intel_crtc *crtc; + + for_each_intel_crtc(&i915->drm, crtc) { + const struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + + entries[crtc->pipe] = crtc_state->wm.skl.ddb; + } + + for_each_intel_crtc(&i915->drm, crtc) { + const struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + u8 slices; + + slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes, + dbuf_state->joined_mbus); + if (dbuf_state->slices[crtc->pipe] & ~slices) + return true; + + if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.ddb, entries, + I915_MAX_PIPES, crtc->pipe)) + return true; + } + + return false; +} + +void skl_wm_sanitize(struct drm_i915_private *i915) +{ + struct intel_crtc *crtc; + + /* + * On TGL/RKL (at least) the BIOS likes to assign the planes + * to the wrong DBUF slices. This will cause an infinite loop + * in skl_commit_modeset_enables() as it can't find a way to + * transition between the old bogus DBUF layout to the new + * proper DBUF layout without DBUF allocation overlaps between + * the planes (which cannot be allowed or else the hardware + * may hang). If we detect a bogus DBUF layout just turn off + * all the planes so that skl_commit_modeset_enables() can + * simply ignore them. + */ + if (!skl_dbuf_is_misconfigured(i915)) + return; + + drm_dbg_kms(&i915->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n"); + + for_each_intel_crtc(&i915->drm, crtc) { + struct intel_plane *plane = to_intel_plane(crtc->base.primary); + const struct intel_plane_state *plane_state = + to_intel_plane_state(plane->base.state); + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + + if (plane_state->uapi.visible) + intel_plane_disable_noatomic(crtc, plane); + + drm_WARN_ON(&i915->drm, crtc_state->active_planes != 0); + + memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb)); + } +} + +void intel_wm_state_verify(struct intel_crtc *crtc, + struct intel_crtc_state *new_crtc_state) +{ + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct skl_hw_state { + struct skl_ddb_entry ddb[I915_MAX_PLANES]; + struct skl_ddb_entry ddb_y[I915_MAX_PLANES]; + struct skl_pipe_wm wm; + } *hw; + const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal; + int level, max_level = ilk_wm_max_level(i915); + struct intel_plane *plane; + u8 hw_enabled_slices; + + if (DISPLAY_VER(i915) < 9 || !new_crtc_state->hw.active) + return; + + hw = kzalloc(sizeof(*hw), GFP_KERNEL); + if (!hw) + return; + + skl_pipe_wm_get_hw_state(crtc, &hw->wm); + + skl_pipe_ddb_get_hw_state(crtc, hw->ddb, hw->ddb_y); + + hw_enabled_slices = intel_enabled_dbuf_slices_mask(i915); + + if (DISPLAY_VER(i915) >= 11 && + hw_enabled_slices != i915->display.dbuf.enabled_slices) + drm_err(&i915->drm, + "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n", + i915->display.dbuf.enabled_slices, + hw_enabled_slices); + + for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) { + const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry; + const struct skl_wm_level *hw_wm_level, *sw_wm_level; + + /* Watermarks */ + for (level = 0; level <= max_level; level++) { + hw_wm_level = &hw->wm.planes[plane->id].wm[level]; + sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level); + + if (skl_wm_level_equals(hw_wm_level, sw_wm_level)) + continue; + + drm_err(&i915->drm, + "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", + plane->base.base.id, plane->base.name, level, + sw_wm_level->enable, + sw_wm_level->blocks, + sw_wm_level->lines, + hw_wm_level->enable, + hw_wm_level->blocks, + hw_wm_level->lines); + } + + hw_wm_level = &hw->wm.planes[plane->id].trans_wm; + sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id); + + if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) { + drm_err(&i915->drm, + "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", + plane->base.base.id, plane->base.name, + sw_wm_level->enable, + sw_wm_level->blocks, + sw_wm_level->lines, + hw_wm_level->enable, + hw_wm_level->blocks, + hw_wm_level->lines); + } + + hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0; + sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0; + + if (HAS_HW_SAGV_WM(i915) && + !skl_wm_level_equals(hw_wm_level, sw_wm_level)) { + drm_err(&i915->drm, + "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", + plane->base.base.id, plane->base.name, + sw_wm_level->enable, + sw_wm_level->blocks, + sw_wm_level->lines, + hw_wm_level->enable, + hw_wm_level->blocks, + hw_wm_level->lines); + } + + hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm; + sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm; + + if (HAS_HW_SAGV_WM(i915) && + !skl_wm_level_equals(hw_wm_level, sw_wm_level)) { + drm_err(&i915->drm, + "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", + plane->base.base.id, plane->base.name, + sw_wm_level->enable, + sw_wm_level->blocks, + sw_wm_level->lines, + hw_wm_level->enable, + hw_wm_level->blocks, + hw_wm_level->lines); + } + + /* DDB */ + hw_ddb_entry = &hw->ddb[PLANE_CURSOR]; + sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb[PLANE_CURSOR]; + + if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { + drm_err(&i915->drm, + "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n", + plane->base.base.id, plane->base.name, + sw_ddb_entry->start, sw_ddb_entry->end, + hw_ddb_entry->start, hw_ddb_entry->end); + } + } + + kfree(hw); +} + +bool skl_watermark_ipc_enabled(struct drm_i915_private *i915) +{ + return i915->display.wm.ipc_enabled; +} + +void skl_watermark_ipc_update(struct drm_i915_private *i915) +{ + if (!HAS_IPC(i915)) + return; + + intel_uncore_rmw(&i915->uncore, DISP_ARB_CTL2, DISP_IPC_ENABLE, + skl_watermark_ipc_enabled(i915) ? DISP_IPC_ENABLE : 0); +} + +static bool skl_watermark_ipc_can_enable(struct drm_i915_private *i915) +{ + /* Display WA #0477 WaDisableIPC: skl */ + if (IS_SKYLAKE(i915)) + return false; + + /* Display WA #1141: SKL:all KBL:all CFL */ + if (IS_KABYLAKE(i915) || + IS_COFFEELAKE(i915) || + IS_COMETLAKE(i915)) + return i915->dram_info.symmetric_memory; + + return true; +} + +void skl_watermark_ipc_init(struct drm_i915_private *i915) +{ + if (!HAS_IPC(i915)) + return; + + i915->display.wm.ipc_enabled = skl_watermark_ipc_can_enable(i915); + + skl_watermark_ipc_update(i915); +} + +static void +adjust_wm_latency(struct drm_i915_private *i915, + u16 wm[], int max_level, int read_latency) +{ + bool wm_lv_0_adjust_needed = i915->dram_info.wm_lv_0_adjust_needed; + int i, level; + + /* + * If a level n (n > 1) has a 0us latency, all levels m (m >= n) + * need to be disabled. We make sure to sanitize the values out + * of the punit to satisfy this requirement. + */ + for (level = 1; level <= max_level; level++) { + if (wm[level] == 0) { + for (i = level + 1; i <= max_level; i++) + wm[i] = 0; + + max_level = level - 1; + break; + } + } + + /* + * WaWmMemoryReadLatency + * + * punit doesn't take into account the read latency so we need + * to add proper adjustement to each valid level we retrieve + * from the punit when level 0 response data is 0us. + */ + if (wm[0] == 0) { + for (level = 0; level <= max_level; level++) + wm[level] += read_latency; + } + + /* + * WA Level-0 adjustment for 16GB DIMMs: SKL+ + * If we could not get dimm info enable this WA to prevent from + * any underrun. If not able to get Dimm info assume 16GB dimm + * to avoid any underrun. + */ + if (wm_lv_0_adjust_needed) + wm[0] += 1; +} + +static void mtl_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) +{ + struct intel_uncore *uncore = &i915->uncore; + int max_level = ilk_wm_max_level(i915); + u32 val; + + val = intel_uncore_read(uncore, MTL_LATENCY_LP0_LP1); + wm[0] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val); + wm[1] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val); + + val = intel_uncore_read(uncore, MTL_LATENCY_LP2_LP3); + wm[2] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val); + wm[3] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val); + + val = intel_uncore_read(uncore, MTL_LATENCY_LP4_LP5); + wm[4] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val); + wm[5] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val); + + adjust_wm_latency(i915, wm, max_level, 6); +} + +static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) +{ + int max_level = ilk_wm_max_level(i915); + int read_latency = DISPLAY_VER(i915) >= 12 ? 3 : 2; + int mult = IS_DG2(i915) ? 2 : 1; + u32 val; + int ret; + + /* read the first set of memory latencies[0:3] */ + val = 0; /* data0 to be programmed to 0 for first set */ + ret = snb_pcode_read(&i915->uncore, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL); + if (ret) { + drm_err(&i915->drm, "SKL Mailbox read error = %d\n", ret); + return; + } + + wm[0] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_0_4_MASK, val) * mult; + wm[1] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_1_5_MASK, val) * mult; + wm[2] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val) * mult; + wm[3] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val) * mult; + + /* read the second set of memory latencies[4:7] */ + val = 1; /* data0 to be programmed to 1 for second set */ + ret = snb_pcode_read(&i915->uncore, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL); + if (ret) { + drm_err(&i915->drm, "SKL Mailbox read error = %d\n", ret); + return; + } + + wm[4] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_0_4_MASK, val) * mult; + wm[5] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_1_5_MASK, val) * mult; + wm[6] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val) * mult; + wm[7] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val) * mult; + + adjust_wm_latency(i915, wm, max_level, read_latency); +} + +static void skl_setup_wm_latency(struct drm_i915_private *i915) +{ + if (DISPLAY_VER(i915) >= 14) + mtl_read_wm_latency(i915, i915->display.wm.skl_latency); + else + skl_read_wm_latency(i915, i915->display.wm.skl_latency); + + intel_print_wm_latency(i915, "Gen9 Plane", i915->display.wm.skl_latency); +} + +static const struct intel_wm_funcs skl_wm_funcs = { + .compute_global_watermarks = skl_compute_wm, +}; + +void skl_wm_init(struct drm_i915_private *i915) +{ + intel_sagv_init(i915); + + skl_setup_wm_latency(i915); + + i915->display.funcs.wm = &skl_wm_funcs; +} + +static struct intel_global_state *intel_dbuf_duplicate_state(struct intel_global_obj *obj) +{ + struct intel_dbuf_state *dbuf_state; + + dbuf_state = kmemdup(obj->state, sizeof(*dbuf_state), GFP_KERNEL); + if (!dbuf_state) + return NULL; + + return &dbuf_state->base; +} + +static void intel_dbuf_destroy_state(struct intel_global_obj *obj, + struct intel_global_state *state) +{ + kfree(state); +} + +static const struct intel_global_state_funcs intel_dbuf_funcs = { + .atomic_duplicate_state = intel_dbuf_duplicate_state, + .atomic_destroy_state = intel_dbuf_destroy_state, +}; + +struct intel_dbuf_state * +intel_atomic_get_dbuf_state(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_global_state *dbuf_state; + + dbuf_state = intel_atomic_get_global_obj_state(state, &i915->display.dbuf.obj); + if (IS_ERR(dbuf_state)) + return ERR_CAST(dbuf_state); + + return to_intel_dbuf_state(dbuf_state); +} + +int intel_dbuf_init(struct drm_i915_private *i915) +{ + struct intel_dbuf_state *dbuf_state; + + dbuf_state = kzalloc(sizeof(*dbuf_state), GFP_KERNEL); + if (!dbuf_state) + return -ENOMEM; + + intel_atomic_global_obj_init(i915, &i915->display.dbuf.obj, + &dbuf_state->base, &intel_dbuf_funcs); + + return 0; +} + +/* + * Configure MBUS_CTL and all DBUF_CTL_S of each slice to join_mbus state before + * update the request state of all DBUS slices. + */ +static void update_mbus_pre_enable(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + u32 mbus_ctl, dbuf_min_tracker_val; + enum dbuf_slice slice; + const struct intel_dbuf_state *dbuf_state = + intel_atomic_get_new_dbuf_state(state); + + if (!HAS_MBUS_JOINING(i915)) + return; + + /* + * TODO: Implement vblank synchronized MBUS joining changes. + * Must be properly coordinated with dbuf reprogramming. + */ + if (dbuf_state->joined_mbus) { + mbus_ctl = MBUS_HASHING_MODE_1x4 | MBUS_JOIN | + MBUS_JOIN_PIPE_SELECT_NONE; + dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(3); + } else { + mbus_ctl = MBUS_HASHING_MODE_2x2 | + MBUS_JOIN_PIPE_SELECT_NONE; + dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(1); + } + + intel_de_rmw(i915, MBUS_CTL, + MBUS_HASHING_MODE_MASK | MBUS_JOIN | + MBUS_JOIN_PIPE_SELECT_MASK, mbus_ctl); + + for_each_dbuf_slice(i915, slice) + intel_de_rmw(i915, DBUF_CTL_S(slice), + DBUF_MIN_TRACKER_STATE_SERVICE_MASK, + dbuf_min_tracker_val); +} + +void intel_dbuf_pre_plane_update(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_dbuf_state *new_dbuf_state = + intel_atomic_get_new_dbuf_state(state); + const struct intel_dbuf_state *old_dbuf_state = + intel_atomic_get_old_dbuf_state(state); + + if (!new_dbuf_state || + (new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices && + new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus)) + return; + + WARN_ON(!new_dbuf_state->base.changed); + + update_mbus_pre_enable(state); + gen9_dbuf_slices_update(i915, + old_dbuf_state->enabled_slices | + new_dbuf_state->enabled_slices); +} + +void intel_dbuf_post_plane_update(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_dbuf_state *new_dbuf_state = + intel_atomic_get_new_dbuf_state(state); + const struct intel_dbuf_state *old_dbuf_state = + intel_atomic_get_old_dbuf_state(state); + + if (!new_dbuf_state || + (new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices && + new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus)) + return; + + WARN_ON(!new_dbuf_state->base.changed); + + gen9_dbuf_slices_update(i915, + new_dbuf_state->enabled_slices); +} + +static bool xelpdp_is_only_pipe_per_dbuf_bank(enum pipe pipe, u8 active_pipes) +{ + switch (pipe) { + case PIPE_A: + return !(active_pipes & BIT(PIPE_D)); + case PIPE_D: + return !(active_pipes & BIT(PIPE_A)); + case PIPE_B: + return !(active_pipes & BIT(PIPE_C)); + case PIPE_C: + return !(active_pipes & BIT(PIPE_B)); + default: /* to suppress compiler warning */ + MISSING_CASE(pipe); + break; + } + + return false; +} + +void intel_mbus_dbox_update(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state; + const struct intel_crtc_state *new_crtc_state; + const struct intel_crtc *crtc; + u32 val = 0; + int i; + + if (DISPLAY_VER(i915) < 11) + return; + + new_dbuf_state = intel_atomic_get_new_dbuf_state(state); + old_dbuf_state = intel_atomic_get_old_dbuf_state(state); + if (!new_dbuf_state || + (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus && + new_dbuf_state->active_pipes == old_dbuf_state->active_pipes)) + return; + + if (DISPLAY_VER(i915) >= 14) + val |= MBUS_DBOX_I_CREDIT(2); + + if (DISPLAY_VER(i915) >= 12) { + val |= MBUS_DBOX_B2B_TRANSACTIONS_MAX(16); + val |= MBUS_DBOX_B2B_TRANSACTIONS_DELAY(1); + val |= MBUS_DBOX_REGULATE_B2B_TRANSACTIONS_EN; + } + + if (DISPLAY_VER(i915) >= 14) + val |= new_dbuf_state->joined_mbus ? MBUS_DBOX_A_CREDIT(12) : + MBUS_DBOX_A_CREDIT(8); + else if (IS_ALDERLAKE_P(i915)) + /* Wa_22010947358:adl-p */ + val |= new_dbuf_state->joined_mbus ? MBUS_DBOX_A_CREDIT(6) : + MBUS_DBOX_A_CREDIT(4); + else + val |= MBUS_DBOX_A_CREDIT(2); + + if (DISPLAY_VER(i915) >= 14) { + val |= MBUS_DBOX_B_CREDIT(0xA); + } else if (IS_ALDERLAKE_P(i915)) { + val |= MBUS_DBOX_BW_CREDIT(2); + val |= MBUS_DBOX_B_CREDIT(8); + } else if (DISPLAY_VER(i915) >= 12) { + val |= MBUS_DBOX_BW_CREDIT(2); + val |= MBUS_DBOX_B_CREDIT(12); + } else { + val |= MBUS_DBOX_BW_CREDIT(1); + val |= MBUS_DBOX_B_CREDIT(8); + } + + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { + u32 pipe_val = val; + + if (!new_crtc_state->hw.active) + continue; + + if (DISPLAY_VER(i915) >= 14) { + if (xelpdp_is_only_pipe_per_dbuf_bank(crtc->pipe, + new_dbuf_state->active_pipes)) + pipe_val |= MBUS_DBOX_BW_8CREDITS_MTL; + else + pipe_val |= MBUS_DBOX_BW_4CREDITS_MTL; + } + + intel_de_write(i915, PIPE_MBUS_DBOX_CTL(crtc->pipe), pipe_val); + } +} + +static int skl_watermark_ipc_status_show(struct seq_file *m, void *data) +{ + struct drm_i915_private *i915 = m->private; + + seq_printf(m, "Isochronous Priority Control: %s\n", + str_yes_no(skl_watermark_ipc_enabled(i915))); + return 0; +} + +static int skl_watermark_ipc_status_open(struct inode *inode, struct file *file) +{ + struct drm_i915_private *i915 = inode->i_private; + + return single_open(file, skl_watermark_ipc_status_show, i915); +} + +static ssize_t skl_watermark_ipc_status_write(struct file *file, + const char __user *ubuf, + size_t len, loff_t *offp) +{ + struct seq_file *m = file->private_data; + struct drm_i915_private *i915 = m->private; + intel_wakeref_t wakeref; + bool enable; + int ret; + + ret = kstrtobool_from_user(ubuf, len, &enable); + if (ret < 0) + return ret; + + with_intel_runtime_pm(&i915->runtime_pm, wakeref) { + if (!skl_watermark_ipc_enabled(i915) && enable) + drm_info(&i915->drm, + "Enabling IPC: WM will be proper only after next commit\n"); + i915->display.wm.ipc_enabled = enable; + skl_watermark_ipc_update(i915); + } + + return len; +} + +static const struct file_operations skl_watermark_ipc_status_fops = { + .owner = THIS_MODULE, + .open = skl_watermark_ipc_status_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = skl_watermark_ipc_status_write +}; + +void skl_watermark_ipc_debugfs_register(struct drm_i915_private *i915) +{ + struct drm_minor *minor = i915->drm.primary; + + if (!HAS_IPC(i915)) + return; + + debugfs_create_file("i915_ipc_status", 0644, minor->debugfs_root, i915, + &skl_watermark_ipc_status_fops); +} diff --git a/drivers/gpu/drm/i915/display/skl_watermark.h b/drivers/gpu/drm/i915/display/skl_watermark.h new file mode 100644 index 000000000000..7a5a4e67cd73 --- /dev/null +++ b/drivers/gpu/drm/i915/display/skl_watermark.h @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __SKL_WATERMARK_H__ +#define __SKL_WATERMARK_H__ + +#include <linux/types.h> + +#include "intel_display.h" +#include "intel_global_state.h" +#include "intel_pm_types.h" + +struct drm_i915_private; +struct intel_atomic_state; +struct intel_bw_state; +struct intel_crtc; +struct intel_crtc_state; +struct intel_plane; + +u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *i915); + +void intel_sagv_pre_plane_update(struct intel_atomic_state *state); +void intel_sagv_post_plane_update(struct intel_atomic_state *state); +bool intel_can_enable_sagv(struct drm_i915_private *i915, + const struct intel_bw_state *bw_state); + +u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *i915, + const struct skl_ddb_entry *entry); + +void skl_write_plane_wm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state); +void skl_write_cursor_wm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state); + +bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb, + const struct skl_ddb_entry *entries, + int num_entries, int ignore_idx); + +void skl_wm_get_hw_state(struct drm_i915_private *i915); +void skl_wm_sanitize(struct drm_i915_private *i915); + +void intel_wm_state_verify(struct intel_crtc *crtc, + struct intel_crtc_state *new_crtc_state); + +void skl_watermark_ipc_init(struct drm_i915_private *i915); +void skl_watermark_ipc_update(struct drm_i915_private *i915); +bool skl_watermark_ipc_enabled(struct drm_i915_private *i915); +void skl_watermark_ipc_debugfs_register(struct drm_i915_private *i915); + +void skl_wm_init(struct drm_i915_private *i915); + +struct intel_dbuf_state { + struct intel_global_state base; + + struct skl_ddb_entry ddb[I915_MAX_PIPES]; + unsigned int weight[I915_MAX_PIPES]; + u8 slices[I915_MAX_PIPES]; + u8 enabled_slices; + u8 active_pipes; + bool joined_mbus; +}; + +struct intel_dbuf_state * +intel_atomic_get_dbuf_state(struct intel_atomic_state *state); + +#define to_intel_dbuf_state(x) container_of((x), struct intel_dbuf_state, base) +#define intel_atomic_get_old_dbuf_state(state) \ + to_intel_dbuf_state(intel_atomic_get_old_global_obj_state(state, &to_i915(state->base.dev)->display.dbuf.obj)) +#define intel_atomic_get_new_dbuf_state(state) \ + to_intel_dbuf_state(intel_atomic_get_new_global_obj_state(state, &to_i915(state->base.dev)->display.dbuf.obj)) + +int intel_dbuf_init(struct drm_i915_private *i915); +void intel_dbuf_pre_plane_update(struct intel_atomic_state *state); +void intel_dbuf_post_plane_update(struct intel_atomic_state *state); +void intel_mbus_dbox_update(struct intel_atomic_state *state); + +#endif /* __SKL_WATERMARK_H__ */ + diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c index b9b1fed99874..b3f5ca280ef2 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi.c @@ -822,9 +822,9 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state, u32 val; /* Disable DPOunit clock gating, can stall pipe */ - val = intel_de_read(dev_priv, DSPCLK_GATE_D); + val = intel_de_read(dev_priv, DSPCLK_GATE_D(dev_priv)); val |= DPOUNIT_CLOCK_GATE_DISABLE; - intel_de_write(dev_priv, DSPCLK_GATE_D, val); + intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), val); } if (!IS_GEMINILAKE(dev_priv)) @@ -998,9 +998,9 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state, vlv_dsi_pll_disable(encoder); - val = intel_de_read(dev_priv, DSPCLK_GATE_D); + val = intel_de_read(dev_priv, DSPCLK_GATE_D(dev_priv)); val &= ~DPOUNIT_CLOCK_GATE_DISABLE; - intel_de_write(dev_priv, DSPCLK_GATE_D, val); + intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), val); } /* Assert reset */ @@ -1277,13 +1277,12 @@ static void intel_dsi_get_config(struct intel_encoder *encoder, pclk = vlv_dsi_get_pclk(encoder, pipe_config); } - if (intel_dsi->dual_link) - pclk *= 2; + pipe_config->port_clock = pclk; - if (pclk) { - pipe_config->hw.adjusted_mode.crtc_clock = pclk; - pipe_config->port_clock = pclk; - } + /* FIXME definitely not right for burst/cmd mode/pixel overlap */ + pipe_config->hw.adjusted_mode.crtc_clock = pclk; + if (intel_dsi->dual_link) + pipe_config->hw.adjusted_mode.crtc_clock *= 2; } /* return txclkesc cycles in terms of divider and duration in us */ @@ -1872,9 +1871,9 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) return; if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) - dev_priv->mipi_mmio_base = BXT_MIPI_BASE; + dev_priv->display.dsi.mmio_base = BXT_MIPI_BASE; else - dev_priv->mipi_mmio_base = VLV_MIPI_BASE; + dev_priv->display.dsi.mmio_base = VLV_MIPI_BASE; intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL); if (!intel_dsi) @@ -1933,8 +1932,11 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) else intel_dsi->ports = BIT(port); - intel_dsi->dcs_backlight_ports = intel_connector->panel.vbt.dsi.bl_ports; - intel_dsi->dcs_cabc_ports = intel_connector->panel.vbt.dsi.cabc_ports; + if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports)) + intel_connector->panel.vbt.dsi.bl_ports &= intel_dsi->ports; + + if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports)) + intel_connector->panel.vbt.dsi.cabc_ports &= intel_dsi->ports; /* Create a DSI host (and a device) for each port. */ for_each_dsi_port(port, intel_dsi->ports) { diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c index 5894b0138343..af7402127cd9 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c @@ -113,6 +113,61 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv, return 0; } +static int vlv_dsi_pclk(struct intel_encoder *encoder, + struct intel_crtc_state *config) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); + int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); + u32 dsi_clock; + u32 pll_ctl, pll_div; + u32 m = 0, p = 0, n; + int refclk = IS_CHERRYVIEW(dev_priv) ? 100000 : 25000; + int i; + + pll_ctl = config->dsi_pll.ctrl; + pll_div = config->dsi_pll.div; + + /* mask out other bits and extract the P1 divisor */ + pll_ctl &= DSI_PLL_P1_POST_DIV_MASK; + pll_ctl = pll_ctl >> (DSI_PLL_P1_POST_DIV_SHIFT - 2); + + /* N1 divisor */ + n = (pll_div & DSI_PLL_N1_DIV_MASK) >> DSI_PLL_N1_DIV_SHIFT; + n = 1 << n; /* register has log2(N1) */ + + /* mask out the other bits and extract the M1 divisor */ + pll_div &= DSI_PLL_M1_DIV_MASK; + pll_div = pll_div >> DSI_PLL_M1_DIV_SHIFT; + + while (pll_ctl) { + pll_ctl = pll_ctl >> 1; + p++; + } + p--; + + if (!p) { + drm_err(&dev_priv->drm, "wrong P1 divisor\n"); + return 0; + } + + for (i = 0; i < ARRAY_SIZE(lfsr_converts); i++) { + if (lfsr_converts[i] == pll_div) + break; + } + + if (i == ARRAY_SIZE(lfsr_converts)) { + drm_err(&dev_priv->drm, "wrong m_seed programmed\n"); + return 0; + } + + m = i + 62; + + dsi_clock = (m * refclk) / (p * n); + + return DIV_ROUND_CLOSEST(dsi_clock * intel_dsi->lane_count, bpp); +} + /* * XXX: The muxing and gating is hard coded for now. Need to add support for * sharing PLLs with two DSI outputs. @@ -122,8 +177,7 @@ int vlv_dsi_pll_compute(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); - int ret; - u32 dsi_clk; + int pclk, dsi_clk, ret; dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format, intel_dsi->lane_count); @@ -145,6 +199,14 @@ int vlv_dsi_pll_compute(struct intel_encoder *encoder, drm_dbg_kms(&dev_priv->drm, "dsi pll div %08x, ctrl %08x\n", config->dsi_pll.div, config->dsi_pll.ctrl); + pclk = vlv_dsi_pclk(encoder, config); + config->port_clock = pclk; + + /* FIXME definitely not right for burst/cmd mode/pixel overlap */ + config->hw.adjusted_mode.crtc_clock = pclk; + if (intel_dsi->dual_link) + config->hw.adjusted_mode.crtc_clock *= 2; + return 0; } @@ -262,13 +324,7 @@ u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, struct intel_crtc_state *config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); - int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); - u32 dsi_clock, pclk; u32 pll_ctl, pll_div; - u32 m = 0, p = 0, n; - int refclk = IS_CHERRYVIEW(dev_priv) ? 100000 : 25000; - int i; drm_dbg_kms(&dev_priv->drm, "\n"); @@ -280,65 +336,31 @@ u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, config->dsi_pll.ctrl = pll_ctl & ~DSI_PLL_LOCK; config->dsi_pll.div = pll_div; - /* mask out other bits and extract the P1 divisor */ - pll_ctl &= DSI_PLL_P1_POST_DIV_MASK; - pll_ctl = pll_ctl >> (DSI_PLL_P1_POST_DIV_SHIFT - 2); - - /* N1 divisor */ - n = (pll_div & DSI_PLL_N1_DIV_MASK) >> DSI_PLL_N1_DIV_SHIFT; - n = 1 << n; /* register has log2(N1) */ - - /* mask out the other bits and extract the M1 divisor */ - pll_div &= DSI_PLL_M1_DIV_MASK; - pll_div = pll_div >> DSI_PLL_M1_DIV_SHIFT; - - while (pll_ctl) { - pll_ctl = pll_ctl >> 1; - p++; - } - p--; - - if (!p) { - drm_err(&dev_priv->drm, "wrong P1 divisor\n"); - return 0; - } - - for (i = 0; i < ARRAY_SIZE(lfsr_converts); i++) { - if (lfsr_converts[i] == pll_div) - break; - } - - if (i == ARRAY_SIZE(lfsr_converts)) { - drm_err(&dev_priv->drm, "wrong m_seed programmed\n"); - return 0; - } - - m = i + 62; + return vlv_dsi_pclk(encoder, config); +} - dsi_clock = (m * refclk) / (p * n); +static int bxt_dsi_pclk(struct intel_encoder *encoder, + const struct intel_crtc_state *config) +{ + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); + int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); + u32 dsi_ratio, dsi_clk; - pclk = DIV_ROUND_CLOSEST(dsi_clock * intel_dsi->lane_count, bpp); + dsi_ratio = config->dsi_pll.ctrl & BXT_DSI_PLL_RATIO_MASK; + dsi_clk = (dsi_ratio * BXT_REF_CLOCK_KHZ) / 2; - return pclk; + return DIV_ROUND_CLOSEST(dsi_clk * intel_dsi->lane_count, bpp); } u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, struct intel_crtc_state *config) { - u32 pclk; - u32 dsi_clk; - u32 dsi_ratio; - struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); + u32 pclk; config->dsi_pll.ctrl = intel_de_read(dev_priv, BXT_DSI_PLL_CTL); - dsi_ratio = config->dsi_pll.ctrl & BXT_DSI_PLL_RATIO_MASK; - - dsi_clk = (dsi_ratio * BXT_REF_CLOCK_KHZ) / 2; - - pclk = DIV_ROUND_CLOSEST(dsi_clk * intel_dsi->lane_count, bpp); + pclk = bxt_dsi_pclk(encoder, config); drm_dbg(&dev_priv->drm, "Calculated pclk=%u\n", pclk); return pclk; @@ -463,6 +485,7 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder, struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); u8 dsi_ratio, dsi_ratio_min, dsi_ratio_max; u32 dsi_clk; + int pclk; dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format, intel_dsi->lane_count); @@ -502,6 +525,14 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder, if (IS_BROXTON(dev_priv) && dsi_ratio <= 50) config->dsi_pll.ctrl |= BXT_DSI_PLL_PVD_RATIO_1; + pclk = bxt_dsi_pclk(encoder, config); + config->port_clock = pclk; + + /* FIXME definitely not right for burst/cmd mode/pixel overlap */ + config->hw.adjusted_mode.crtc_clock = pclk; + if (intel_dsi->dual_link) + config->hw.adjusted_mode.crtc_clock *= 2; + return 0; } diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_regs.h b/drivers/gpu/drm/i915/display/vlv_dsi_regs.h index 356e51515346..e065b8f2ee08 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi_regs.h +++ b/drivers/gpu/drm/i915/display/vlv_dsi_regs.h @@ -11,6 +11,8 @@ #define VLV_MIPI_BASE VLV_DISPLAY_BASE #define BXT_MIPI_BASE 0x60000 +#define _MIPI_MMIO_BASE(__i915) ((__i915)->display.dsi.mmio_base) + #define _MIPI_PORT(port, a, c) (((port) == PORT_A) ? a : c) /* ports A and C only */ #define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c)) @@ -96,8 +98,8 @@ /* MIPI DSI Controller and D-PHY registers */ -#define _MIPIA_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb000) -#define _MIPIC_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb800) +#define _MIPIA_DEVICE_READY (_MIPI_MMIO_BASE(dev_priv) + 0xb000) +#define _MIPIC_DEVICE_READY (_MIPI_MMIO_BASE(dev_priv) + 0xb800) #define MIPI_DEVICE_READY(port) _MMIO_MIPI(port, _MIPIA_DEVICE_READY, _MIPIC_DEVICE_READY) #define BUS_POSSESSION (1 << 3) /* set to give bus to receiver */ #define ULPS_STATE_MASK (3 << 1) @@ -106,11 +108,11 @@ #define ULPS_STATE_NORMAL_OPERATION (0 << 1) #define DEVICE_READY (1 << 0) -#define _MIPIA_INTR_STAT (dev_priv->mipi_mmio_base + 0xb004) -#define _MIPIC_INTR_STAT (dev_priv->mipi_mmio_base + 0xb804) +#define _MIPIA_INTR_STAT (_MIPI_MMIO_BASE(dev_priv) + 0xb004) +#define _MIPIC_INTR_STAT (_MIPI_MMIO_BASE(dev_priv) + 0xb804) #define MIPI_INTR_STAT(port) _MMIO_MIPI(port, _MIPIA_INTR_STAT, _MIPIC_INTR_STAT) -#define _MIPIA_INTR_EN (dev_priv->mipi_mmio_base + 0xb008) -#define _MIPIC_INTR_EN (dev_priv->mipi_mmio_base + 0xb808) +#define _MIPIA_INTR_EN (_MIPI_MMIO_BASE(dev_priv) + 0xb008) +#define _MIPIC_INTR_EN (_MIPI_MMIO_BASE(dev_priv) + 0xb808) #define MIPI_INTR_EN(port) _MMIO_MIPI(port, _MIPIA_INTR_EN, _MIPIC_INTR_EN) #define TEARING_EFFECT (1 << 31) #define SPL_PKT_SENT_INTERRUPT (1 << 30) @@ -145,8 +147,8 @@ #define RXSOT_SYNC_ERROR (1 << 1) #define RXSOT_ERROR (1 << 0) -#define _MIPIA_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb00c) -#define _MIPIC_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb80c) +#define _MIPIA_DSI_FUNC_PRG (_MIPI_MMIO_BASE(dev_priv) + 0xb00c) +#define _MIPIC_DSI_FUNC_PRG (_MIPI_MMIO_BASE(dev_priv) + 0xb80c) #define MIPI_DSI_FUNC_PRG(port) _MMIO_MIPI(port, _MIPIA_DSI_FUNC_PRG, _MIPIC_DSI_FUNC_PRG) #define CMD_MODE_DATA_WIDTH_MASK (7 << 13) #define CMD_MODE_NOT_SUPPORTED (0 << 13) @@ -168,76 +170,76 @@ #define DATA_LANES_PRG_REG_SHIFT 0 #define DATA_LANES_PRG_REG_MASK (7 << 0) -#define _MIPIA_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb010) -#define _MIPIC_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb810) +#define _MIPIA_HS_TX_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb010) +#define _MIPIC_HS_TX_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb810) #define MIPI_HS_TX_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_HS_TX_TIMEOUT, _MIPIC_HS_TX_TIMEOUT) #define HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK 0xffffff -#define _MIPIA_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb014) -#define _MIPIC_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb814) +#define _MIPIA_LP_RX_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb014) +#define _MIPIC_LP_RX_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb814) #define MIPI_LP_RX_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_LP_RX_TIMEOUT, _MIPIC_LP_RX_TIMEOUT) #define LOW_POWER_RX_TIMEOUT_COUNTER_MASK 0xffffff -#define _MIPIA_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb018) -#define _MIPIC_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb818) +#define _MIPIA_TURN_AROUND_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb018) +#define _MIPIC_TURN_AROUND_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb818) #define MIPI_TURN_AROUND_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_TURN_AROUND_TIMEOUT, _MIPIC_TURN_AROUND_TIMEOUT) #define TURN_AROUND_TIMEOUT_MASK 0x3f -#define _MIPIA_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb01c) -#define _MIPIC_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb81c) +#define _MIPIA_DEVICE_RESET_TIMER (_MIPI_MMIO_BASE(dev_priv) + 0xb01c) +#define _MIPIC_DEVICE_RESET_TIMER (_MIPI_MMIO_BASE(dev_priv) + 0xb81c) #define MIPI_DEVICE_RESET_TIMER(port) _MMIO_MIPI(port, _MIPIA_DEVICE_RESET_TIMER, _MIPIC_DEVICE_RESET_TIMER) #define DEVICE_RESET_TIMER_MASK 0xffff -#define _MIPIA_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb020) -#define _MIPIC_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb820) +#define _MIPIA_DPI_RESOLUTION (_MIPI_MMIO_BASE(dev_priv) + 0xb020) +#define _MIPIC_DPI_RESOLUTION (_MIPI_MMIO_BASE(dev_priv) + 0xb820) #define MIPI_DPI_RESOLUTION(port) _MMIO_MIPI(port, _MIPIA_DPI_RESOLUTION, _MIPIC_DPI_RESOLUTION) #define VERTICAL_ADDRESS_SHIFT 16 #define VERTICAL_ADDRESS_MASK (0xffff << 16) #define HORIZONTAL_ADDRESS_SHIFT 0 #define HORIZONTAL_ADDRESS_MASK 0xffff -#define _MIPIA_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb024) -#define _MIPIC_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb824) +#define _MIPIA_DBI_FIFO_THROTTLE (_MIPI_MMIO_BASE(dev_priv) + 0xb024) +#define _MIPIC_DBI_FIFO_THROTTLE (_MIPI_MMIO_BASE(dev_priv) + 0xb824) #define MIPI_DBI_FIFO_THROTTLE(port) _MMIO_MIPI(port, _MIPIA_DBI_FIFO_THROTTLE, _MIPIC_DBI_FIFO_THROTTLE) #define DBI_FIFO_EMPTY_HALF (0 << 0) #define DBI_FIFO_EMPTY_QUARTER (1 << 0) #define DBI_FIFO_EMPTY_7_LOCATIONS (2 << 0) /* regs below are bits 15:0 */ -#define _MIPIA_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb028) -#define _MIPIC_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb828) +#define _MIPIA_HSYNC_PADDING_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb028) +#define _MIPIC_HSYNC_PADDING_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb828) #define MIPI_HSYNC_PADDING_COUNT(port) _MMIO_MIPI(port, _MIPIA_HSYNC_PADDING_COUNT, _MIPIC_HSYNC_PADDING_COUNT) -#define _MIPIA_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb02c) -#define _MIPIC_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb82c) +#define _MIPIA_HBP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb02c) +#define _MIPIC_HBP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb82c) #define MIPI_HBP_COUNT(port) _MMIO_MIPI(port, _MIPIA_HBP_COUNT, _MIPIC_HBP_COUNT) -#define _MIPIA_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb030) -#define _MIPIC_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb830) +#define _MIPIA_HFP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb030) +#define _MIPIC_HFP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb830) #define MIPI_HFP_COUNT(port) _MMIO_MIPI(port, _MIPIA_HFP_COUNT, _MIPIC_HFP_COUNT) -#define _MIPIA_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb034) -#define _MIPIC_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb834) +#define _MIPIA_HACTIVE_AREA_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb034) +#define _MIPIC_HACTIVE_AREA_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb834) #define MIPI_HACTIVE_AREA_COUNT(port) _MMIO_MIPI(port, _MIPIA_HACTIVE_AREA_COUNT, _MIPIC_HACTIVE_AREA_COUNT) -#define _MIPIA_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb038) -#define _MIPIC_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb838) +#define _MIPIA_VSYNC_PADDING_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb038) +#define _MIPIC_VSYNC_PADDING_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb838) #define MIPI_VSYNC_PADDING_COUNT(port) _MMIO_MIPI(port, _MIPIA_VSYNC_PADDING_COUNT, _MIPIC_VSYNC_PADDING_COUNT) -#define _MIPIA_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb03c) -#define _MIPIC_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb83c) +#define _MIPIA_VBP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb03c) +#define _MIPIC_VBP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb83c) #define MIPI_VBP_COUNT(port) _MMIO_MIPI(port, _MIPIA_VBP_COUNT, _MIPIC_VBP_COUNT) -#define _MIPIA_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb040) -#define _MIPIC_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb840) +#define _MIPIA_VFP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb040) +#define _MIPIC_VFP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb840) #define MIPI_VFP_COUNT(port) _MMIO_MIPI(port, _MIPIA_VFP_COUNT, _MIPIC_VFP_COUNT) -#define _MIPIA_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb044) -#define _MIPIC_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb844) +#define _MIPIA_HIGH_LOW_SWITCH_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb044) +#define _MIPIC_HIGH_LOW_SWITCH_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb844) #define MIPI_HIGH_LOW_SWITCH_COUNT(port) _MMIO_MIPI(port, _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIC_HIGH_LOW_SWITCH_COUNT) -#define _MIPIA_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb048) -#define _MIPIC_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb848) +#define _MIPIA_DPI_CONTROL (_MIPI_MMIO_BASE(dev_priv) + 0xb048) +#define _MIPIC_DPI_CONTROL (_MIPI_MMIO_BASE(dev_priv) + 0xb848) #define MIPI_DPI_CONTROL(port) _MMIO_MIPI(port, _MIPIA_DPI_CONTROL, _MIPIC_DPI_CONTROL) #define DPI_LP_MODE (1 << 6) #define BACKLIGHT_OFF (1 << 5) @@ -247,27 +249,27 @@ #define TURN_ON (1 << 1) #define SHUTDOWN (1 << 0) -#define _MIPIA_DPI_DATA (dev_priv->mipi_mmio_base + 0xb04c) -#define _MIPIC_DPI_DATA (dev_priv->mipi_mmio_base + 0xb84c) +#define _MIPIA_DPI_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb04c) +#define _MIPIC_DPI_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb84c) #define MIPI_DPI_DATA(port) _MMIO_MIPI(port, _MIPIA_DPI_DATA, _MIPIC_DPI_DATA) #define COMMAND_BYTE_SHIFT 0 #define COMMAND_BYTE_MASK (0x3f << 0) -#define _MIPIA_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb050) -#define _MIPIC_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb850) +#define _MIPIA_INIT_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb050) +#define _MIPIC_INIT_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb850) #define MIPI_INIT_COUNT(port) _MMIO_MIPI(port, _MIPIA_INIT_COUNT, _MIPIC_INIT_COUNT) #define MASTER_INIT_TIMER_SHIFT 0 #define MASTER_INIT_TIMER_MASK (0xffff << 0) -#define _MIPIA_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb054) -#define _MIPIC_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb854) +#define _MIPIA_MAX_RETURN_PKT_SIZE (_MIPI_MMIO_BASE(dev_priv) + 0xb054) +#define _MIPIC_MAX_RETURN_PKT_SIZE (_MIPI_MMIO_BASE(dev_priv) + 0xb854) #define MIPI_MAX_RETURN_PKT_SIZE(port) _MMIO_MIPI(port, \ _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIC_MAX_RETURN_PKT_SIZE) #define MAX_RETURN_PKT_SIZE_SHIFT 0 #define MAX_RETURN_PKT_SIZE_MASK (0x3ff << 0) -#define _MIPIA_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb058) -#define _MIPIC_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb858) +#define _MIPIA_VIDEO_MODE_FORMAT (_MIPI_MMIO_BASE(dev_priv) + 0xb058) +#define _MIPIC_VIDEO_MODE_FORMAT (_MIPI_MMIO_BASE(dev_priv) + 0xb858) #define MIPI_VIDEO_MODE_FORMAT(port) _MMIO_MIPI(port, _MIPIA_VIDEO_MODE_FORMAT, _MIPIC_VIDEO_MODE_FORMAT) #define RANDOM_DPI_DISPLAY_RESOLUTION (1 << 4) #define DISABLE_VIDEO_BTA (1 << 3) @@ -276,8 +278,8 @@ #define VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS (2 << 0) #define VIDEO_MODE_BURST (3 << 0) -#define _MIPIA_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb05c) -#define _MIPIC_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb85c) +#define _MIPIA_EOT_DISABLE (_MIPI_MMIO_BASE(dev_priv) + 0xb05c) +#define _MIPIC_EOT_DISABLE (_MIPI_MMIO_BASE(dev_priv) + 0xb85c) #define MIPI_EOT_DISABLE(port) _MMIO_MIPI(port, _MIPIA_EOT_DISABLE, _MIPIC_EOT_DISABLE) #define BXT_DEFEATURE_DPI_FIFO_CTR (1 << 9) #define BXT_DPHY_DEFEATURE_EN (1 << 8) @@ -290,35 +292,35 @@ #define CLOCKSTOP (1 << 1) #define EOT_DISABLE (1 << 0) -#define _MIPIA_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb060) -#define _MIPIC_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb860) +#define _MIPIA_LP_BYTECLK (_MIPI_MMIO_BASE(dev_priv) + 0xb060) +#define _MIPIC_LP_BYTECLK (_MIPI_MMIO_BASE(dev_priv) + 0xb860) #define MIPI_LP_BYTECLK(port) _MMIO_MIPI(port, _MIPIA_LP_BYTECLK, _MIPIC_LP_BYTECLK) #define LP_BYTECLK_SHIFT 0 #define LP_BYTECLK_MASK (0xffff << 0) -#define _MIPIA_TLPX_TIME_COUNT (dev_priv->mipi_mmio_base + 0xb0a4) -#define _MIPIC_TLPX_TIME_COUNT (dev_priv->mipi_mmio_base + 0xb8a4) +#define _MIPIA_TLPX_TIME_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb0a4) +#define _MIPIC_TLPX_TIME_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb8a4) #define MIPI_TLPX_TIME_COUNT(port) _MMIO_MIPI(port, _MIPIA_TLPX_TIME_COUNT, _MIPIC_TLPX_TIME_COUNT) -#define _MIPIA_CLK_LANE_TIMING (dev_priv->mipi_mmio_base + 0xb098) -#define _MIPIC_CLK_LANE_TIMING (dev_priv->mipi_mmio_base + 0xb898) +#define _MIPIA_CLK_LANE_TIMING (_MIPI_MMIO_BASE(dev_priv) + 0xb098) +#define _MIPIC_CLK_LANE_TIMING (_MIPI_MMIO_BASE(dev_priv) + 0xb898) #define MIPI_CLK_LANE_TIMING(port) _MMIO_MIPI(port, _MIPIA_CLK_LANE_TIMING, _MIPIC_CLK_LANE_TIMING) /* bits 31:0 */ -#define _MIPIA_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb064) -#define _MIPIC_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb864) +#define _MIPIA_LP_GEN_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb064) +#define _MIPIC_LP_GEN_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb864) #define MIPI_LP_GEN_DATA(port) _MMIO_MIPI(port, _MIPIA_LP_GEN_DATA, _MIPIC_LP_GEN_DATA) /* bits 31:0 */ -#define _MIPIA_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb068) -#define _MIPIC_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb868) +#define _MIPIA_HS_GEN_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb068) +#define _MIPIC_HS_GEN_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb868) #define MIPI_HS_GEN_DATA(port) _MMIO_MIPI(port, _MIPIA_HS_GEN_DATA, _MIPIC_HS_GEN_DATA) -#define _MIPIA_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb06c) -#define _MIPIC_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb86c) +#define _MIPIA_LP_GEN_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb06c) +#define _MIPIC_LP_GEN_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb86c) #define MIPI_LP_GEN_CTRL(port) _MMIO_MIPI(port, _MIPIA_LP_GEN_CTRL, _MIPIC_LP_GEN_CTRL) -#define _MIPIA_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb070) -#define _MIPIC_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb870) +#define _MIPIA_HS_GEN_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb070) +#define _MIPIC_HS_GEN_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb870) #define MIPI_HS_GEN_CTRL(port) _MMIO_MIPI(port, _MIPIA_HS_GEN_CTRL, _MIPIC_HS_GEN_CTRL) #define LONG_PACKET_WORD_COUNT_SHIFT 8 #define LONG_PACKET_WORD_COUNT_MASK (0xffff << 8) @@ -330,8 +332,8 @@ #define DATA_TYPE_MASK (0x3f << 0) /* data type values, see include/video/mipi_display.h */ -#define _MIPIA_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb074) -#define _MIPIC_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb874) +#define _MIPIA_GEN_FIFO_STAT (_MIPI_MMIO_BASE(dev_priv) + 0xb074) +#define _MIPIC_GEN_FIFO_STAT (_MIPI_MMIO_BASE(dev_priv) + 0xb874) #define MIPI_GEN_FIFO_STAT(port) _MMIO_MIPI(port, _MIPIA_GEN_FIFO_STAT, _MIPIC_GEN_FIFO_STAT) #define DPI_FIFO_EMPTY (1 << 28) #define DBI_FIFO_EMPTY (1 << 27) @@ -348,15 +350,15 @@ #define HS_DATA_FIFO_HALF_EMPTY (1 << 1) #define HS_DATA_FIFO_FULL (1 << 0) -#define _MIPIA_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb078) -#define _MIPIC_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb878) +#define _MIPIA_HS_LS_DBI_ENABLE (_MIPI_MMIO_BASE(dev_priv) + 0xb078) +#define _MIPIC_HS_LS_DBI_ENABLE (_MIPI_MMIO_BASE(dev_priv) + 0xb878) #define MIPI_HS_LP_DBI_ENABLE(port) _MMIO_MIPI(port, _MIPIA_HS_LS_DBI_ENABLE, _MIPIC_HS_LS_DBI_ENABLE) #define DBI_HS_LP_MODE_MASK (1 << 0) #define DBI_LP_MODE (1 << 0) #define DBI_HS_MODE (0 << 0) -#define _MIPIA_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb080) -#define _MIPIC_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb880) +#define _MIPIA_DPHY_PARAM (_MIPI_MMIO_BASE(dev_priv) + 0xb080) +#define _MIPIC_DPHY_PARAM (_MIPI_MMIO_BASE(dev_priv) + 0xb880) #define MIPI_DPHY_PARAM(port) _MMIO_MIPI(port, _MIPIA_DPHY_PARAM, _MIPIC_DPHY_PARAM) #define EXIT_ZERO_COUNT_SHIFT 24 #define EXIT_ZERO_COUNT_MASK (0x3f << 24) @@ -367,34 +369,34 @@ #define PREPARE_COUNT_SHIFT 0 #define PREPARE_COUNT_MASK (0x3f << 0) -#define _MIPIA_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb084) -#define _MIPIC_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb884) +#define _MIPIA_DBI_BW_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb084) +#define _MIPIC_DBI_BW_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb884) #define MIPI_DBI_BW_CTRL(port) _MMIO_MIPI(port, _MIPIA_DBI_BW_CTRL, _MIPIC_DBI_BW_CTRL) -#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base + 0xb088) -#define _MIPIC_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base + 0xb888) +#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (_MIPI_MMIO_BASE(dev_priv) + 0xb088) +#define _MIPIC_CLK_LANE_SWITCH_TIME_CNT (_MIPI_MMIO_BASE(dev_priv) + 0xb888) #define MIPI_CLK_LANE_SWITCH_TIME_CNT(port) _MMIO_MIPI(port, _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIC_CLK_LANE_SWITCH_TIME_CNT) #define LP_HS_SSW_CNT_SHIFT 16 #define LP_HS_SSW_CNT_MASK (0xffff << 16) #define HS_LP_PWR_SW_CNT_SHIFT 0 #define HS_LP_PWR_SW_CNT_MASK (0xffff << 0) -#define _MIPIA_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb08c) -#define _MIPIC_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb88c) +#define _MIPIA_STOP_STATE_STALL (_MIPI_MMIO_BASE(dev_priv) + 0xb08c) +#define _MIPIC_STOP_STATE_STALL (_MIPI_MMIO_BASE(dev_priv) + 0xb88c) #define MIPI_STOP_STATE_STALL(port) _MMIO_MIPI(port, _MIPIA_STOP_STATE_STALL, _MIPIC_STOP_STATE_STALL) #define STOP_STATE_STALL_COUNTER_SHIFT 0 #define STOP_STATE_STALL_COUNTER_MASK (0xff << 0) -#define _MIPIA_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb090) -#define _MIPIC_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb890) +#define _MIPIA_INTR_STAT_REG_1 (_MIPI_MMIO_BASE(dev_priv) + 0xb090) +#define _MIPIC_INTR_STAT_REG_1 (_MIPI_MMIO_BASE(dev_priv) + 0xb890) #define MIPI_INTR_STAT_REG_1(port) _MMIO_MIPI(port, _MIPIA_INTR_STAT_REG_1, _MIPIC_INTR_STAT_REG_1) -#define _MIPIA_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb094) -#define _MIPIC_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb894) +#define _MIPIA_INTR_EN_REG_1 (_MIPI_MMIO_BASE(dev_priv) + 0xb094) +#define _MIPIC_INTR_EN_REG_1 (_MIPI_MMIO_BASE(dev_priv) + 0xb894) #define MIPI_INTR_EN_REG_1(port) _MMIO_MIPI(port, _MIPIA_INTR_EN_REG_1, _MIPIC_INTR_EN_REG_1) #define RX_CONTENTION_DETECTED (1 << 0) /* XXX: only pipe A ?!? */ -#define MIPIA_DBI_TYPEC_CTRL (dev_priv->mipi_mmio_base + 0xb100) +#define MIPIA_DBI_TYPEC_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb100) #define DBI_TYPEC_ENABLE (1 << 31) #define DBI_TYPEC_WIP (1 << 30) #define DBI_TYPEC_OPTION_SHIFT 28 @@ -407,8 +409,8 @@ /* MIPI adapter registers */ -#define _MIPIA_CTRL (dev_priv->mipi_mmio_base + 0xb104) -#define _MIPIC_CTRL (dev_priv->mipi_mmio_base + 0xb904) +#define _MIPIA_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb104) +#define _MIPIC_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb904) #define MIPI_CTRL(port) _MMIO_MIPI(port, _MIPIA_CTRL, _MIPIC_CTRL) #define ESCAPE_CLOCK_DIVIDER_SHIFT 5 /* A only */ #define ESCAPE_CLOCK_DIVIDER_MASK (3 << 5) @@ -440,21 +442,21 @@ #define GLK_MIPIIO_PORT_POWERED (1 << 1) /* RO */ #define GLK_MIPIIO_ENABLE (1 << 0) -#define _MIPIA_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb108) -#define _MIPIC_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb908) +#define _MIPIA_DATA_ADDRESS (_MIPI_MMIO_BASE(dev_priv) + 0xb108) +#define _MIPIC_DATA_ADDRESS (_MIPI_MMIO_BASE(dev_priv) + 0xb908) #define MIPI_DATA_ADDRESS(port) _MMIO_MIPI(port, _MIPIA_DATA_ADDRESS, _MIPIC_DATA_ADDRESS) #define DATA_MEM_ADDRESS_SHIFT 5 #define DATA_MEM_ADDRESS_MASK (0x7ffffff << 5) #define DATA_VALID (1 << 0) -#define _MIPIA_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb10c) -#define _MIPIC_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb90c) +#define _MIPIA_DATA_LENGTH (_MIPI_MMIO_BASE(dev_priv) + 0xb10c) +#define _MIPIC_DATA_LENGTH (_MIPI_MMIO_BASE(dev_priv) + 0xb90c) #define MIPI_DATA_LENGTH(port) _MMIO_MIPI(port, _MIPIA_DATA_LENGTH, _MIPIC_DATA_LENGTH) #define DATA_LENGTH_SHIFT 0 #define DATA_LENGTH_MASK (0xfffff << 0) -#define _MIPIA_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb110) -#define _MIPIC_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb910) +#define _MIPIA_COMMAND_ADDRESS (_MIPI_MMIO_BASE(dev_priv) + 0xb110) +#define _MIPIC_COMMAND_ADDRESS (_MIPI_MMIO_BASE(dev_priv) + 0xb910) #define MIPI_COMMAND_ADDRESS(port) _MMIO_MIPI(port, _MIPIA_COMMAND_ADDRESS, _MIPIC_COMMAND_ADDRESS) #define COMMAND_MEM_ADDRESS_SHIFT 5 #define COMMAND_MEM_ADDRESS_MASK (0x7ffffff << 5) @@ -462,18 +464,18 @@ #define MEMORY_WRITE_DATA_FROM_PIPE_RENDERING (1 << 1) #define COMMAND_VALID (1 << 0) -#define _MIPIA_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb114) -#define _MIPIC_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb914) +#define _MIPIA_COMMAND_LENGTH (_MIPI_MMIO_BASE(dev_priv) + 0xb114) +#define _MIPIC_COMMAND_LENGTH (_MIPI_MMIO_BASE(dev_priv) + 0xb914) #define MIPI_COMMAND_LENGTH(port) _MMIO_MIPI(port, _MIPIA_COMMAND_LENGTH, _MIPIC_COMMAND_LENGTH) #define COMMAND_LENGTH_SHIFT(n) (8 * (n)) /* n: 0...3 */ #define COMMAND_LENGTH_MASK(n) (0xff << (8 * (n))) -#define _MIPIA_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb118) -#define _MIPIC_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb918) +#define _MIPIA_READ_DATA_RETURN0 (_MIPI_MMIO_BASE(dev_priv) + 0xb118) +#define _MIPIC_READ_DATA_RETURN0 (_MIPI_MMIO_BASE(dev_priv) + 0xb918) #define MIPI_READ_DATA_RETURN(port, n) _MMIO(_MIPI(port, _MIPIA_READ_DATA_RETURN0, _MIPIC_READ_DATA_RETURN0) + 4 * (n)) /* n: 0...7 */ -#define _MIPIA_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb138) -#define _MIPIC_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb938) +#define _MIPIA_READ_DATA_VALID (_MIPI_MMIO_BASE(dev_priv) + 0xb138) +#define _MIPIC_READ_DATA_VALID (_MIPI_MMIO_BASE(dev_priv) + 0xb938) #define MIPI_READ_DATA_VALID(port) _MMIO_MIPI(port, _MIPIA_READ_DATA_VALID, _MIPIC_READ_DATA_VALID) #define READ_DATA_VALID(n) (1 << (n)) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h index 1b88ea13435c..5a7a14e85c3f 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h @@ -12,8 +12,6 @@ struct drm_i915_private; struct drm_i915_gem_object; struct intel_memory_region; -extern const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops; - void __iomem * i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj, unsigned long n, diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index 3218981488cc..73d9eda1d6b7 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -413,7 +413,7 @@ retry: vma->mmo = mmo; if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND) - intel_wakeref_auto(&to_gt(i915)->ggtt->userfault_wakeref, + intel_wakeref_auto(&to_gt(i915)->userfault_wakeref, msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)); if (write) { @@ -550,6 +550,20 @@ out: intel_runtime_pm_put(&i915->runtime_pm, wakeref); } +void i915_gem_object_runtime_pm_release_mmap_offset(struct drm_i915_gem_object *obj) +{ + struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); + struct ttm_device *bdev = bo->bdev; + + drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping); + + if (obj->userfault_count) { + /* rpm wakeref provide exclusive access */ + list_del(&obj->userfault_link); + obj->userfault_count = 0; + } +} + void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj) { struct i915_mmap_offset *mmo, *mn; @@ -573,6 +587,13 @@ void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj) spin_lock(&obj->mmo.lock); } spin_unlock(&obj->mmo.lock); + + if (obj->userfault_count) { + mutex_lock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock); + list_del(&obj->userfault_link); + mutex_unlock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock); + obj->userfault_count = 0; + } } static struct i915_mmap_offset * diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.h b/drivers/gpu/drm/i915/gem/i915_gem_mman.h index efee9e0d2508..1fa91b3033b3 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.h @@ -27,6 +27,7 @@ int i915_gem_dumb_mmap_offset(struct drm_file *file_priv, void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj); void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj); +void i915_gem_object_runtime_pm_release_mmap_offset(struct drm_i915_gem_object *obj); void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj); #endif diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index 85482a04d158..7ff9c7877bec 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -238,7 +238,7 @@ static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj) { /* Skip serialisation and waking the device if known to be not used. */ - if (obj->userfault_count) + if (obj->userfault_count && !IS_DGFX(to_i915(obj->base.dev))) i915_gem_object_release_mmap_gtt(obj); if (!RB_EMPTY_ROOT(&obj->mmo.offsets)) { diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index 9f6b14ec189a..40305e2bcd49 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -298,7 +298,8 @@ struct drm_i915_gem_object { }; /** - * Whether the object is currently in the GGTT mmap. + * Whether the object is currently in the GGTT or any other supported + * fake offset mmap backed by lmem. */ unsigned int userfault_count; struct list_head userfault_link; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index 8357dbdcab5c..4df50b049cea 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -20,7 +20,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, unsigned int sg_page_sizes) { struct drm_i915_private *i915 = to_i915(obj->base.dev); - unsigned long supported = INTEL_INFO(i915)->page_sizes; + unsigned long supported = RUNTIME_INFO(i915)->page_sizes; bool shrinkable; int i; @@ -66,7 +66,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, shrinkable = i915_gem_object_is_shrinkable(obj); if (i915_gem_object_is_tiled(obj) && - i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) { + i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) { GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj)); i915_gem_object_set_tiling_quirk(obj); GEM_BUG_ON(!list_empty(&obj->mm.link)); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c index 00359ec9d58b..3428f735e786 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c @@ -24,7 +24,7 @@ void i915_gem_suspend(struct drm_i915_private *i915) { GEM_TRACE("%s\n", dev_name(i915->drm.dev)); - intel_wakeref_auto(&to_gt(i915)->ggtt->userfault_wakeref, 0); + intel_wakeref_auto(&to_gt(i915)->userfault_wakeref, 0); flush_workqueue(i915->wq); /* diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index 166d0a4b9e8c..acc561c0f0aa 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -18,10 +18,12 @@ #include "gt/intel_region_lmem.h" #include "i915_drv.h" #include "i915_gem_stolen.h" +#include "i915_pci.h" #include "i915_reg.h" #include "i915_utils.h" #include "i915_vgpu.h" #include "intel_mchbar_regs.h" +#include "intel_pci_config.h" /* * The BIOS typically reserves some of the system's memory for the exclusive @@ -428,48 +430,29 @@ static int i915_gem_init_stolen(struct intel_memory_region *mem) reserved_base = stolen_top; reserved_size = 0; - switch (GRAPHICS_VER(i915)) { - case 2: - case 3: - break; - case 4: - if (!IS_G4X(i915)) - break; - fallthrough; - case 5: - g4x_get_stolen_reserved(i915, uncore, + if (GRAPHICS_VER(i915) >= 11) { + icl_get_stolen_reserved(i915, uncore, &reserved_base, &reserved_size); - break; - case 6: - gen6_get_stolen_reserved(i915, uncore, - &reserved_base, &reserved_size); - break; - case 7: - if (IS_VALLEYVIEW(i915)) - vlv_get_stolen_reserved(i915, uncore, - &reserved_base, &reserved_size); - else - gen7_get_stolen_reserved(i915, uncore, - &reserved_base, &reserved_size); - break; - case 8: - case 9: + } else if (GRAPHICS_VER(i915) >= 8) { if (IS_LP(i915)) chv_get_stolen_reserved(i915, uncore, &reserved_base, &reserved_size); else bdw_get_stolen_reserved(i915, uncore, &reserved_base, &reserved_size); - break; - default: - MISSING_CASE(GRAPHICS_VER(i915)); - fallthrough; - case 11: - case 12: - icl_get_stolen_reserved(i915, uncore, - &reserved_base, - &reserved_size); - break; + } else if (GRAPHICS_VER(i915) >= 7) { + if (IS_VALLEYVIEW(i915)) + vlv_get_stolen_reserved(i915, uncore, + &reserved_base, &reserved_size); + else + gen7_get_stolen_reserved(i915, uncore, + &reserved_base, &reserved_size); + } else if (GRAPHICS_VER(i915) >= 6) { + gen6_get_stolen_reserved(i915, uncore, + &reserved_base, &reserved_size); + } else if (GRAPHICS_VER(i915) >= 5 || IS_G4X(i915)) { + g4x_get_stolen_reserved(i915, uncore, + &reserved_base, &reserved_size); } /* @@ -827,10 +810,13 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type, if (WARN_ON_ONCE(instance)) return ERR_PTR(-ENODEV); + if (!i915_pci_resource_valid(pdev, GEN12_LMEM_BAR)) + return ERR_PTR(-ENXIO); + /* Use DSM base address instead for stolen memory */ dsm_base = intel_uncore_read64(uncore, GEN12_DSMBASE); if (IS_DG1(uncore->i915)) { - lmem_size = pci_resource_len(pdev, 2); + lmem_size = pci_resource_len(pdev, GEN12_LMEM_BAR); if (WARN_ON(lmem_size < dsm_base)) return ERR_PTR(-ENODEV); } else { @@ -842,11 +828,11 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type, } dsm_size = lmem_size - dsm_base; - if (pci_resource_len(pdev, 2) < lmem_size) { + if (pci_resource_len(pdev, GEN12_LMEM_BAR) < lmem_size) { io_start = 0; io_size = 0; } else { - io_start = pci_resource_start(pdev, 2) + dsm_base; + io_start = pci_resource_start(pdev, GEN12_LMEM_BAR) + dsm_base; io_size = dsm_size; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c index 85518b28cd72..fd42b89b7162 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c @@ -278,7 +278,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj, */ if (i915_gem_object_has_pages(obj) && obj->mm.madv == I915_MADV_WILLNEED && - i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) { + i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) { if (tiling == I915_TILING_NONE) { GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj)); i915_gem_object_clear_tiling_quirk(obj); @@ -458,7 +458,7 @@ i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data, } /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */ - if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) + if (dev_priv->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) args->phys_swizzle_mode = I915_BIT_6_SWIZZLE_UNKNOWN; else args->phys_swizzle_mode = args->swizzle_mode; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c index f64a3deb12fc..0544b0a4a43a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c @@ -509,9 +509,18 @@ static int i915_ttm_shrink(struct drm_i915_gem_object *obj, unsigned int flags) static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo) { struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); + intel_wakeref_t wakeref = 0; if (likely(obj)) { + /* ttm_bo_release() already has dma_resv_lock */ + if (i915_ttm_cpu_maps_iomem(bo->resource)) + wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm); + __i915_gem_object_pages_fini(obj); + + if (wakeref) + intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm, wakeref); + i915_ttm_free_cached_io_rsgt(obj); } } @@ -981,6 +990,7 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) struct ttm_buffer_object *bo = area->vm_private_data; struct drm_device *dev = bo->base.dev; struct drm_i915_gem_object *obj; + intel_wakeref_t wakeref = 0; vm_fault_t ret; int idx; @@ -1002,6 +1012,9 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) return VM_FAULT_SIGBUS; } + if (i915_ttm_cpu_maps_iomem(bo->resource)) + wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm); + if (!i915_ttm_resource_mappable(bo->resource)) { int err = -ENODEV; int i; @@ -1023,7 +1036,8 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) if (err) { drm_dbg(dev, "Unable to make resource CPU accessible\n"); dma_resv_unlock(bo->base.resv); - return VM_FAULT_SIGBUS; + ret = VM_FAULT_SIGBUS; + goto out_rpm; } } @@ -1034,12 +1048,30 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) } else { ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot); } + if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) - return ret; + goto out_rpm; + + /* ttm_bo_vm_reserve() already has dma_resv_lock */ + if (ret == VM_FAULT_NOPAGE && wakeref && !obj->userfault_count) { + obj->userfault_count = 1; + mutex_lock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock); + list_add(&obj->userfault_link, &to_gt(to_i915(obj->base.dev))->lmem_userfault_list); + mutex_unlock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock); + } + + if (wakeref & CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND) + intel_wakeref_auto(&to_gt(to_i915(obj->base.dev))->userfault_wakeref, + msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)); i915_ttm_adjust_lru(obj); dma_resv_unlock(bo->base.resv); + +out_rpm: + if (wakeref) + intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm, wakeref); + return ret; } diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c index 72ce2c9f42fd..c570cf780079 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c @@ -358,7 +358,7 @@ fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single) static int igt_check_page_sizes(struct i915_vma *vma) { struct drm_i915_private *i915 = vma->vm->i915; - unsigned int supported = INTEL_INFO(i915)->page_sizes; + unsigned int supported = RUNTIME_INFO(i915)->page_sizes; struct drm_i915_gem_object *obj = vma->obj; int err; @@ -419,7 +419,7 @@ static int igt_mock_exhaust_device_supported_pages(void *arg) { struct i915_ppgtt *ppgtt = arg; struct drm_i915_private *i915 = ppgtt->vm.i915; - unsigned int saved_mask = INTEL_INFO(i915)->page_sizes; + unsigned int saved_mask = RUNTIME_INFO(i915)->page_sizes; struct drm_i915_gem_object *obj; struct i915_vma *vma; int i, j, single; @@ -438,7 +438,7 @@ static int igt_mock_exhaust_device_supported_pages(void *arg) combination |= page_sizes[j]; } - mkwrite_device_info(i915)->page_sizes = combination; + RUNTIME_INFO(i915)->page_sizes = combination; for (single = 0; single <= 1; ++single) { obj = fake_huge_pages_object(i915, combination, !!single); @@ -485,7 +485,7 @@ static int igt_mock_exhaust_device_supported_pages(void *arg) out_put: i915_gem_object_put(obj); out_device: - mkwrite_device_info(i915)->page_sizes = saved_mask; + RUNTIME_INFO(i915)->page_sizes = saved_mask; return err; } @@ -495,7 +495,7 @@ static int igt_mock_memory_region_huge_pages(void *arg) const unsigned int flags[] = { 0, I915_BO_ALLOC_CONTIGUOUS }; struct i915_ppgtt *ppgtt = arg; struct drm_i915_private *i915 = ppgtt->vm.i915; - unsigned long supported = INTEL_INFO(i915)->page_sizes; + unsigned long supported = RUNTIME_INFO(i915)->page_sizes; struct intel_memory_region *mem; struct drm_i915_gem_object *obj; struct i915_vma *vma; @@ -573,7 +573,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg) { struct i915_ppgtt *ppgtt = arg; struct drm_i915_private *i915 = ppgtt->vm.i915; - unsigned long supported = INTEL_INFO(i915)->page_sizes; + unsigned long supported = RUNTIME_INFO(i915)->page_sizes; struct drm_i915_gem_object *obj; int bit; int err; @@ -1390,7 +1390,7 @@ out_put: static int igt_ppgtt_sanity_check(void *arg) { struct drm_i915_private *i915 = arg; - unsigned int supported = INTEL_INFO(i915)->page_sizes; + unsigned int supported = RUNTIME_INFO(i915)->page_sizes; struct { igt_create_fn fn; unsigned int flags; @@ -1764,8 +1764,8 @@ int i915_gem_huge_page_mock_selftests(void) return -ENOMEM; /* Pretend to be a device which supports the 48b PPGTT */ - mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL; - mkwrite_device_info(dev_priv)->ppgtt_size = 48; + RUNTIME_INFO(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL; + RUNTIME_INFO(dev_priv)->ppgtt_size = 48; ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0); if (IS_ERR(ppgtt)) { diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c index 3cfc621ef363..9a6a6b5b722b 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c @@ -711,7 +711,7 @@ static bool bad_swizzling(struct drm_i915_private *i915) { struct i915_ggtt *ggtt = to_gt(i915)->ggtt; - if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) + if (i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) return true; if (has_bit17_swizzle(ggtt->bit_6_swizzle_x) || diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index 55bf23dc0e54..b73c91aa5450 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -367,7 +367,7 @@ static int igt_partial_tiling(void *arg) unsigned int pitch; struct tile tile; - if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) + if (i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) /* * The swizzling pattern is actually unknown as it * varies based on physical address of each page. @@ -464,7 +464,7 @@ static int igt_smoke_tiling(void *arg) * Remember to look at the st_seed if we see a flip-flop in BAT! */ - if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) + if (i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) return 0; obj = huge_gem_object(i915, diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c index 98645797962f..e49fa6fa6aee 100644 --- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c @@ -165,10 +165,12 @@ static u32 preparser_disable(bool state) return MI_ARB_CHECK | 1 << 8 | state; } -u32 *gen12_emit_aux_table_inv(u32 *cs, const i915_reg_t inv_reg) +u32 *gen12_emit_aux_table_inv(struct intel_gt *gt, u32 *cs, const i915_reg_t inv_reg) { + u32 gsi_offset = gt->uncore->gsi_offset; + *cs++ = MI_LOAD_REGISTER_IMM(1) | MI_LRI_MMIO_REMAP_EN; - *cs++ = i915_mmio_reg_offset(inv_reg); + *cs++ = i915_mmio_reg_offset(inv_reg) + gsi_offset; *cs++ = AUX_INV; *cs++ = MI_NOOP; @@ -254,7 +256,8 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode) if (!HAS_FLAT_CCS(rq->engine->i915)) { /* hsdes: 1809175790 */ - cs = gen12_emit_aux_table_inv(cs, GEN12_GFX_CCS_AUX_NV); + cs = gen12_emit_aux_table_inv(rq->engine->gt, + cs, GEN12_GFX_CCS_AUX_NV); } *cs++ = preparser_disable(false); @@ -313,9 +316,11 @@ int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode) if (aux_inv) { /* hsdes: 1809175790 */ if (rq->engine->class == VIDEO_DECODE_CLASS) - cs = gen12_emit_aux_table_inv(cs, GEN12_VD0_AUX_NV); + cs = gen12_emit_aux_table_inv(rq->engine->gt, + cs, GEN12_VD0_AUX_NV); else - cs = gen12_emit_aux_table_inv(cs, GEN12_VE0_AUX_NV); + cs = gen12_emit_aux_table_inv(rq->engine->gt, + cs, GEN12_VE0_AUX_NV); } if (mode & EMIT_INVALIDATE) diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.h b/drivers/gpu/drm/i915/gt/gen8_engine_cs.h index 32e3d2b831bb..e4d24c811dd6 100644 --- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.h +++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.h @@ -13,6 +13,7 @@ #include "intel_gt_regs.h" #include "intel_gpu_commands.h" +struct intel_gt; struct i915_request; int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode); @@ -45,7 +46,7 @@ u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs); u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs); u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs); -u32 *gen12_emit_aux_table_inv(u32 *cs, const i915_reg_t inv_reg); +u32 *gen12_emit_aux_table_inv(struct intel_gt *gt, u32 *cs, const i915_reg_t inv_reg); static inline u32 * __gen8_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset) diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 17e7f20bbb48..1f7188129cd1 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -654,16 +654,83 @@ bool gen11_vdbox_has_sfc(struct intel_gt *gt, */ if ((gt->info.sfc_mask & BIT(physical_vdbox / 2)) == 0) return false; - else if (GRAPHICS_VER(i915) == 12) + else if (MEDIA_VER(i915) >= 12) return (physical_vdbox % 2 == 0) || !(BIT(physical_vdbox - 1) & vdbox_mask); - else if (GRAPHICS_VER(i915) == 11) + else if (MEDIA_VER(i915) == 11) return logical_vdbox % 2 == 0; - MISSING_CASE(GRAPHICS_VER(i915)); return false; } +static void engine_mask_apply_media_fuses(struct intel_gt *gt) +{ + struct drm_i915_private *i915 = gt->i915; + unsigned int logical_vdbox = 0; + unsigned int i; + u32 media_fuse, fuse1; + u16 vdbox_mask; + u16 vebox_mask; + + if (MEDIA_VER(gt->i915) < 11) + return; + + /* + * On newer platforms the fusing register is called 'enable' and has + * enable semantics, while on older platforms it is called 'disable' + * and bits have disable semantices. + */ + media_fuse = intel_uncore_read(gt->uncore, GEN11_GT_VEBOX_VDBOX_DISABLE); + if (MEDIA_VER_FULL(i915) < IP_VER(12, 50)) + media_fuse = ~media_fuse; + + vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK; + vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >> + GEN11_GT_VEBOX_DISABLE_SHIFT; + + if (MEDIA_VER_FULL(i915) >= IP_VER(12, 50)) { + fuse1 = intel_uncore_read(gt->uncore, HSW_PAVP_FUSE1); + gt->info.sfc_mask = REG_FIELD_GET(XEHP_SFC_ENABLE_MASK, fuse1); + } else { + gt->info.sfc_mask = ~0; + } + + for (i = 0; i < I915_MAX_VCS; i++) { + if (!HAS_ENGINE(gt, _VCS(i))) { + vdbox_mask &= ~BIT(i); + continue; + } + + if (!(BIT(i) & vdbox_mask)) { + gt->info.engine_mask &= ~BIT(_VCS(i)); + drm_dbg(&i915->drm, "vcs%u fused off\n", i); + continue; + } + + if (gen11_vdbox_has_sfc(gt, i, logical_vdbox, vdbox_mask)) + gt->info.vdbox_sfc_access |= BIT(i); + logical_vdbox++; + } + drm_dbg(&i915->drm, "vdbox enable: %04x, instances: %04lx\n", + vdbox_mask, VDBOX_MASK(gt)); + GEM_BUG_ON(vdbox_mask != VDBOX_MASK(gt)); + + for (i = 0; i < I915_MAX_VECS; i++) { + if (!HAS_ENGINE(gt, _VECS(i))) { + vebox_mask &= ~BIT(i); + continue; + } + + if (!(BIT(i) & vebox_mask)) { + gt->info.engine_mask &= ~BIT(_VECS(i)); + drm_dbg(&i915->drm, "vecs%u fused off\n", i); + } + } + drm_dbg(&i915->drm, "vebox enable: %04x, instances: %04lx\n", + vebox_mask, VEBOX_MASK(gt)); + GEM_BUG_ON(vebox_mask != VEBOX_MASK(gt)); +} + static void engine_mask_apply_compute_fuses(struct intel_gt *gt) { struct drm_i915_private *i915 = gt->i915; @@ -672,6 +739,9 @@ static void engine_mask_apply_compute_fuses(struct intel_gt *gt) unsigned long ccs_mask; unsigned int i; + if (GRAPHICS_VER(i915) < 11) + return; + if (hweight32(CCS_MASK(gt)) <= 1) return; @@ -694,6 +764,10 @@ static void engine_mask_apply_copy_fuses(struct intel_gt *gt) unsigned long meml3_mask; unsigned long quad; + if (!(GRAPHICS_VER_FULL(i915) >= IP_VER(12, 60) && + GRAPHICS_VER_FULL(i915) < IP_VER(12, 70))) + return; + meml3_mask = intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3); meml3_mask = REG_FIELD_GET(GEN12_MEML3_EN_MASK, meml3_mask); @@ -727,75 +801,11 @@ static void engine_mask_apply_copy_fuses(struct intel_gt *gt) */ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt) { - struct drm_i915_private *i915 = gt->i915; struct intel_gt_info *info = >->info; - struct intel_uncore *uncore = gt->uncore; - unsigned int logical_vdbox = 0; - unsigned int i; - u32 media_fuse, fuse1; - u16 vdbox_mask; - u16 vebox_mask; - - info->engine_mask = INTEL_INFO(i915)->platform_engine_mask; - - if (GRAPHICS_VER(i915) < 11) - return info->engine_mask; - /* - * On newer platforms the fusing register is called 'enable' and has - * enable semantics, while on older platforms it is called 'disable' - * and bits have disable semantices. - */ - media_fuse = intel_uncore_read(uncore, GEN11_GT_VEBOX_VDBOX_DISABLE); - if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 50)) - media_fuse = ~media_fuse; - - vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK; - vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >> - GEN11_GT_VEBOX_DISABLE_SHIFT; - - if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) { - fuse1 = intel_uncore_read(uncore, HSW_PAVP_FUSE1); - gt->info.sfc_mask = REG_FIELD_GET(XEHP_SFC_ENABLE_MASK, fuse1); - } else { - gt->info.sfc_mask = ~0; - } - - for (i = 0; i < I915_MAX_VCS; i++) { - if (!HAS_ENGINE(gt, _VCS(i))) { - vdbox_mask &= ~BIT(i); - continue; - } - - if (!(BIT(i) & vdbox_mask)) { - info->engine_mask &= ~BIT(_VCS(i)); - drm_dbg(&i915->drm, "vcs%u fused off\n", i); - continue; - } - - if (gen11_vdbox_has_sfc(gt, i, logical_vdbox, vdbox_mask)) - gt->info.vdbox_sfc_access |= BIT(i); - logical_vdbox++; - } - drm_dbg(&i915->drm, "vdbox enable: %04x, instances: %04lx\n", - vdbox_mask, VDBOX_MASK(gt)); - GEM_BUG_ON(vdbox_mask != VDBOX_MASK(gt)); - - for (i = 0; i < I915_MAX_VECS; i++) { - if (!HAS_ENGINE(gt, _VECS(i))) { - vebox_mask &= ~BIT(i); - continue; - } - - if (!(BIT(i) & vebox_mask)) { - info->engine_mask &= ~BIT(_VECS(i)); - drm_dbg(&i915->drm, "vecs%u fused off\n", i); - } - } - drm_dbg(&i915->drm, "vebox enable: %04x, instances: %04lx\n", - vebox_mask, VEBOX_MASK(gt)); - GEM_BUG_ON(vebox_mask != VEBOX_MASK(gt)); + GEM_BUG_ON(!info->engine_mask); + engine_mask_apply_media_fuses(gt); engine_mask_apply_compute_fuses(gt); engine_mask_apply_copy_fuses(gt); @@ -1688,9 +1698,9 @@ bool intel_engine_irq_enable(struct intel_engine_cs *engine) return false; /* Caller disables interrupts */ - spin_lock(&engine->gt->irq_lock); + spin_lock(engine->gt->irq_lock); engine->irq_enable(engine); - spin_unlock(&engine->gt->irq_lock); + spin_unlock(engine->gt->irq_lock); return true; } @@ -1701,9 +1711,9 @@ void intel_engine_irq_disable(struct intel_engine_cs *engine) return; /* Caller disables interrupts */ - spin_lock(&engine->gt->irq_lock); + spin_lock(engine->gt->irq_lock); engine->irq_disable(engine); - spin_unlock(&engine->gt->irq_lock); + spin_unlock(engine->gt->irq_lock); } void intel_engines_reset_default_submission(struct intel_gt *gt) diff --git a/drivers/gpu/drm/i915/gt/intel_engine_regs.h b/drivers/gpu/drm/i915/gt/intel_engine_regs.h index 889f0df3940b..fe1a0d5fd4b1 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_regs.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_regs.h @@ -110,6 +110,7 @@ #define RING_SBBSTATE(base) _MMIO((base) + 0x118) /* hsw+ */ #define RING_SBBADDR_UDW(base) _MMIO((base) + 0x11c) /* gen8+ */ #define RING_BBADDR(base) _MMIO((base) + 0x140) +#define RING_BB_OFFSET(base) _MMIO((base) + 0x158) #define RING_BBADDR_UDW(base) _MMIO((base) + 0x168) /* gen8+ */ #define CCID(base) _MMIO((base) + 0x180) #define CCID_EN BIT(0) diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c index 15a915bb4088..30cf5c3369d9 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -16,7 +16,9 @@ #include "intel_ggtt_gmch.h" #include "intel_gt.h" #include "intel_gt_regs.h" +#include "intel_pci_config.h" #include "i915_drv.h" +#include "i915_pci.h" #include "i915_scatterlist.h" #include "i915_utils.h" #include "i915_vgpu.h" @@ -869,8 +871,8 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) u32 pte_flags; int ret; - GEM_WARN_ON(pci_resource_len(pdev, 0) != gen6_gttmmadr_size(i915)); - phys_addr = pci_resource_start(pdev, 0) + gen6_gttadr_offset(i915); + GEM_WARN_ON(pci_resource_len(pdev, GTTMMADR_BAR) != gen6_gttmmadr_size(i915)); + phys_addr = pci_resource_start(pdev, GTTMMADR_BAR) + gen6_gttadr_offset(i915); /* * On BXT+/ICL+ writes larger than 64 bit to the GTT pagetable range @@ -930,7 +932,10 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) u16 snb_gmch_ctl; if (!HAS_LMEM(i915)) { - ggtt->gmadr = pci_resource(pdev, 2); + if (!i915_pci_resource_valid(pdev, GTT_APERTURE_BAR)) + return -ENXIO; + + ggtt->gmadr = pci_resource(pdev, GTT_APERTURE_BAR); ggtt->mappable_end = resource_size(&ggtt->gmadr); } @@ -1084,7 +1089,10 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt) unsigned int size; u16 snb_gmch_ctl; - ggtt->gmadr = pci_resource(pdev, 2); + if (!i915_pci_resource_valid(pdev, GTT_APERTURE_BAR)) + return -ENXIO; + + ggtt->gmadr = pci_resource(pdev, GTT_APERTURE_BAR); ggtt->mappable_end = resource_size(&ggtt->gmadr); /* diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c index 6ebda3d65086..ea775e601686 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c @@ -727,7 +727,7 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt) * bit17 dependent, and so we need to also prevent the pages * from being moved. */ - i915->quirks |= QUIRK_PIN_SWIZZLED_PAGES; + i915->gem_quirks |= GEM_QUIRK_PIN_SWIZZLED_PAGES; swizzle_x = I915_BIT_6_SWIZZLE_NONE; swizzle_y = I915_BIT_6_SWIZZLE_NONE; } @@ -842,7 +842,6 @@ void intel_ggtt_init_fences(struct i915_ggtt *ggtt) INIT_LIST_HEAD(&ggtt->fence_list); INIT_LIST_HEAD(&ggtt->userfault_list); - intel_wakeref_auto_init(&ggtt->userfault_wakeref, uncore->rpm); detect_bit_6_swizzle(ggtt); diff --git a/drivers/gpu/drm/i915/gt/intel_gsc.c b/drivers/gpu/drm/i915/gt/intel_gsc.c index 0e494028b81d..7af6db3194dd 100644 --- a/drivers/gpu/drm/i915/gt/intel_gsc.c +++ b/drivers/gpu/drm/i915/gt/intel_gsc.c @@ -7,6 +7,7 @@ #include <linux/mei_aux.h> #include "i915_drv.h" #include "i915_reg.h" +#include "gem/i915_gem_region.h" #include "gt/intel_gsc.h" #include "gt/intel_gt.h" @@ -36,10 +37,56 @@ static int gsc_irq_init(int irq) return irq_set_chip_data(irq, NULL); } +static int +gsc_ext_om_alloc(struct intel_gsc *gsc, struct intel_gsc_intf *intf, size_t size) +{ + struct intel_gt *gt = gsc_to_gt(gsc); + struct drm_i915_gem_object *obj; + int err; + + obj = i915_gem_object_create_lmem(gt->i915, size, + I915_BO_ALLOC_CONTIGUOUS | + I915_BO_ALLOC_CPU_CLEAR); + if (IS_ERR(obj)) { + drm_err(>->i915->drm, "Failed to allocate gsc memory\n"); + return PTR_ERR(obj); + } + + err = i915_gem_object_pin_pages_unlocked(obj); + if (err) { + drm_err(>->i915->drm, "Failed to pin pages for gsc memory\n"); + goto out_put; + } + + intf->gem_obj = obj; + + return 0; + +out_put: + i915_gem_object_put(obj); + return err; +} + +static void gsc_ext_om_destroy(struct intel_gsc_intf *intf) +{ + struct drm_i915_gem_object *obj = fetch_and_zero(&intf->gem_obj); + + if (!obj) + return; + + if (i915_gem_object_has_pinned_pages(obj)) + i915_gem_object_unpin_pages(obj); + + i915_gem_object_put(obj); +} + struct gsc_def { const char *name; unsigned long bar; size_t bar_size; + bool use_polling; + bool slow_firmware; + size_t lmem_size; }; /* gsc resources and definitions (HECI1 and HECI2) */ @@ -54,11 +101,25 @@ static const struct gsc_def gsc_def_dg1[] = { } }; +static const struct gsc_def gsc_def_xehpsdv[] = { + { + /* HECI1 not enabled on the device. */ + }, + { + .name = "mei-gscfi", + .bar = DG1_GSC_HECI2_BASE, + .bar_size = GSC_BAR_LENGTH, + .use_polling = true, + .slow_firmware = true, + } +}; + static const struct gsc_def gsc_def_dg2[] = { { .name = "mei-gsc", .bar = DG2_GSC_HECI1_BASE, .bar_size = GSC_BAR_LENGTH, + .lmem_size = SZ_4M, }, { .name = "mei-gscfi", @@ -75,26 +136,32 @@ static void gsc_release_dev(struct device *dev) kfree(adev); } -static void gsc_destroy_one(struct intel_gsc_intf *intf) +static void gsc_destroy_one(struct drm_i915_private *i915, + struct intel_gsc *gsc, unsigned int intf_id) { + struct intel_gsc_intf *intf = &gsc->intf[intf_id]; + if (intf->adev) { auxiliary_device_delete(&intf->adev->aux_dev); auxiliary_device_uninit(&intf->adev->aux_dev); intf->adev = NULL; } + if (intf->irq >= 0) irq_free_desc(intf->irq); intf->irq = -1; + + gsc_ext_om_destroy(intf); } -static void gsc_init_one(struct drm_i915_private *i915, - struct intel_gsc_intf *intf, +static void gsc_init_one(struct drm_i915_private *i915, struct intel_gsc *gsc, unsigned int intf_id) { struct pci_dev *pdev = to_pci_dev(i915->drm.dev); struct mei_aux_device *adev; struct auxiliary_device *aux_dev; const struct gsc_def *def; + struct intel_gsc_intf *intf = &gsc->intf[intf_id]; int ret; intf->irq = -1; @@ -105,6 +172,8 @@ static void gsc_init_one(struct drm_i915_private *i915, if (IS_DG1(i915)) { def = &gsc_def_dg1[intf_id]; + } else if (IS_XEHPSDV(i915)) { + def = &gsc_def_xehpsdv[intf_id]; } else if (IS_DG2(i915)) { def = &gsc_def_dg2[intf_id]; } else { @@ -117,10 +186,14 @@ static void gsc_init_one(struct drm_i915_private *i915, return; } + /* skip irq initialization */ + if (def->use_polling) + goto add_device; + intf->irq = irq_alloc_desc(0); if (intf->irq < 0) { drm_err(&i915->drm, "gsc irq error %d\n", intf->irq); - return; + goto fail; } ret = gsc_irq_init(intf->irq); @@ -129,16 +202,31 @@ static void gsc_init_one(struct drm_i915_private *i915, goto fail; } +add_device: adev = kzalloc(sizeof(*adev), GFP_KERNEL); if (!adev) goto fail; + if (def->lmem_size) { + drm_dbg(&i915->drm, "setting up GSC lmem\n"); + + if (gsc_ext_om_alloc(gsc, intf, def->lmem_size)) { + drm_err(&i915->drm, "setting up gsc extended operational memory failed\n"); + kfree(adev); + goto fail; + } + + adev->ext_op_mem.start = i915_gem_object_get_dma_address(intf->gem_obj, 0); + adev->ext_op_mem.end = adev->ext_op_mem.start + def->lmem_size; + } + adev->irq = intf->irq; adev->bar.parent = &pdev->resource[0]; adev->bar.start = def->bar + pdev->resource[0].start; adev->bar.end = adev->bar.start + def->bar_size - 1; adev->bar.flags = IORESOURCE_MEM; adev->bar.desc = IORES_DESC_NONE; + adev->slow_firmware = def->slow_firmware; aux_dev = &adev->aux_dev; aux_dev->name = def->name; @@ -165,7 +253,7 @@ static void gsc_init_one(struct drm_i915_private *i915, return; fail: - gsc_destroy_one(intf); + gsc_destroy_one(i915, gsc, intf->id); } static void gsc_irq_handler(struct intel_gt *gt, unsigned int intf_id) @@ -182,10 +270,8 @@ static void gsc_irq_handler(struct intel_gt *gt, unsigned int intf_id) return; } - if (gt->gsc.intf[intf_id].irq < 0) { - drm_err_ratelimited(>->i915->drm, "GSC irq: irq not set"); + if (gt->gsc.intf[intf_id].irq < 0) return; - } ret = generic_handle_irq(gt->gsc.intf[intf_id].irq); if (ret) @@ -208,7 +294,7 @@ void intel_gsc_init(struct intel_gsc *gsc, struct drm_i915_private *i915) return; for (i = 0; i < INTEL_GSC_NUM_INTERFACES; i++) - gsc_init_one(i915, &gsc->intf[i], i); + gsc_init_one(i915, gsc, i); } void intel_gsc_fini(struct intel_gsc *gsc) @@ -220,5 +306,5 @@ void intel_gsc_fini(struct intel_gsc *gsc) return; for (i = 0; i < INTEL_GSC_NUM_INTERFACES; i++) - gsc_destroy_one(&gsc->intf[i]); + gsc_destroy_one(gt->i915, gsc, i); } diff --git a/drivers/gpu/drm/i915/gt/intel_gsc.h b/drivers/gpu/drm/i915/gt/intel_gsc.h index 68582f912b21..fcac1775e9c3 100644 --- a/drivers/gpu/drm/i915/gt/intel_gsc.h +++ b/drivers/gpu/drm/i915/gt/intel_gsc.h @@ -20,11 +20,14 @@ struct mei_aux_device; /** * struct intel_gsc - graphics security controller + * + * @gem_obj: scratch memory GSC operations * @intf : gsc interface */ struct intel_gsc { struct intel_gsc_intf { struct mei_aux_device *adev; + struct drm_i915_gem_object *gem_obj; int irq; unsigned int id; } intf[INTEL_GSC_NUM_INTERFACES]; diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index f435e06125aa..d0b03a928b9a 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -26,18 +26,22 @@ #include "intel_gt_requests.h" #include "intel_migrate.h" #include "intel_mocs.h" +#include "intel_pci_config.h" #include "intel_pm.h" #include "intel_rc6.h" #include "intel_renderstate.h" #include "intel_rps.h" +#include "intel_sa_media.h" #include "intel_gt_sysfs.h" #include "intel_uncore.h" #include "shmem_utils.h" -static void __intel_gt_init_early(struct intel_gt *gt) +void intel_gt_common_init_early(struct intel_gt *gt) { - spin_lock_init(>->irq_lock); + spin_lock_init(gt->irq_lock); + INIT_LIST_HEAD(>->lmem_userfault_list); + mutex_init(>->lmem_userfault_lock); INIT_LIST_HEAD(>->closed_vma); spin_lock_init(>->closed_lock); @@ -57,14 +61,19 @@ static void __intel_gt_init_early(struct intel_gt *gt) } /* Preliminary initialization of Tile 0 */ -void intel_root_gt_init_early(struct drm_i915_private *i915) +int intel_root_gt_init_early(struct drm_i915_private *i915) { struct intel_gt *gt = to_gt(i915); gt->i915 = i915; gt->uncore = &i915->uncore; + gt->irq_lock = drmm_kzalloc(&i915->drm, sizeof(*gt->irq_lock), GFP_KERNEL); + if (!gt->irq_lock) + return -ENOMEM; - __intel_gt_init_early(gt); + intel_gt_common_init_early(gt); + + return 0; } static int intel_gt_probe_lmem(struct intel_gt *gt) @@ -780,26 +789,25 @@ static int intel_gt_tile_setup(struct intel_gt *gt, phys_addr_t phys_addr) int ret; if (!gt_is_root(gt)) { - struct intel_uncore_mmio_debug *mmio_debug; struct intel_uncore *uncore; + spinlock_t *irq_lock; - uncore = kzalloc(sizeof(*uncore), GFP_KERNEL); + uncore = drmm_kzalloc(>->i915->drm, sizeof(*uncore), GFP_KERNEL); if (!uncore) return -ENOMEM; - mmio_debug = kzalloc(sizeof(*mmio_debug), GFP_KERNEL); - if (!mmio_debug) { - kfree(uncore); + irq_lock = drmm_kzalloc(>->i915->drm, sizeof(*irq_lock), GFP_KERNEL); + if (!irq_lock) return -ENOMEM; - } gt->uncore = uncore; - gt->uncore->debug = mmio_debug; + gt->irq_lock = irq_lock; - __intel_gt_init_early(gt); + intel_gt_common_init_early(gt); } intel_uncore_init_early(gt->uncore, gt); + intel_wakeref_auto_init(>->userfault_wakeref, gt->uncore->rpm); ret = intel_uncore_setup_mmio(gt->uncore, phys_addr); if (ret) @@ -810,27 +818,17 @@ static int intel_gt_tile_setup(struct intel_gt *gt, phys_addr_t phys_addr) return 0; } -static void -intel_gt_tile_cleanup(struct intel_gt *gt) -{ - intel_uncore_cleanup_mmio(gt->uncore); - - if (!gt_is_root(gt)) { - kfree(gt->uncore->debug); - kfree(gt->uncore); - kfree(gt); - } -} - int intel_gt_probe_all(struct drm_i915_private *i915) { struct pci_dev *pdev = to_pci_dev(i915->drm.dev); struct intel_gt *gt = &i915->gt0; + const struct intel_gt_definition *gtdef; phys_addr_t phys_addr; unsigned int mmio_bar; + unsigned int i; int ret; - mmio_bar = GRAPHICS_VER(i915) == 2 ? 1 : 0; + mmio_bar = GRAPHICS_VER(i915) == 2 ? GEN2_GTTMMADR_BAR : GTTMMADR_BAR; phys_addr = pci_resource_start(pdev, mmio_bar); /* @@ -838,14 +836,74 @@ int intel_gt_probe_all(struct drm_i915_private *i915) * and it has been already initialized early during probe * in i915_driver_probe() */ + gt->i915 = i915; + gt->name = "Primary GT"; + gt->info.engine_mask = RUNTIME_INFO(i915)->platform_engine_mask; + + drm_dbg(&i915->drm, "Setting up %s\n", gt->name); ret = intel_gt_tile_setup(gt, phys_addr); if (ret) return ret; i915->gt[0] = gt; - /* TODO: add more tiles */ + if (!HAS_EXTRA_GT_LIST(i915)) + return 0; + + for (i = 1, gtdef = &INTEL_INFO(i915)->extra_gt_list[i - 1]; + gtdef->name != NULL; + i++, gtdef = &INTEL_INFO(i915)->extra_gt_list[i - 1]) { + gt = drmm_kzalloc(&i915->drm, sizeof(*gt), GFP_KERNEL); + if (!gt) { + ret = -ENOMEM; + goto err; + } + + gt->i915 = i915; + gt->name = gtdef->name; + gt->type = gtdef->type; + gt->info.engine_mask = gtdef->engine_mask; + gt->info.id = i; + + drm_dbg(&i915->drm, "Setting up %s\n", gt->name); + if (GEM_WARN_ON(range_overflows_t(resource_size_t, + gtdef->mapping_base, + SZ_16M, + pci_resource_len(pdev, mmio_bar)))) { + ret = -ENODEV; + goto err; + } + + switch (gtdef->type) { + case GT_TILE: + ret = intel_gt_tile_setup(gt, phys_addr + gtdef->mapping_base); + break; + + case GT_MEDIA: + ret = intel_sa_mediagt_setup(gt, phys_addr + gtdef->mapping_base, + gtdef->gsi_offset); + break; + + case GT_PRIMARY: + /* Primary GT should not appear in extra GT list */ + default: + MISSING_CASE(gtdef->type); + ret = -ENODEV; + } + + if (ret) + goto err; + + i915->gt[i] = gt; + } + return 0; + +err: + i915_probe_error(i915, "Failed to initialize %s! (%d)\n", gtdef->name, ret); + intel_gt_release_all(i915); + + return ret; } int intel_gt_tiles_init(struct drm_i915_private *i915) @@ -868,10 +926,8 @@ void intel_gt_release_all(struct drm_i915_private *i915) struct intel_gt *gt; unsigned int id; - for_each_gt(gt, i915, id) { - intel_gt_tile_cleanup(gt); + for_each_gt(gt, i915, id) i915->gt[id] = NULL; - } } void intel_gt_info_print(const struct intel_gt_info *info, diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h index 40b06adf509a..2ee582e287c8 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.h +++ b/drivers/gpu/drm/i915/gt/intel_gt.h @@ -44,7 +44,8 @@ static inline struct intel_gt *gsc_to_gt(struct intel_gsc *gsc) return container_of(gsc, struct intel_gt, gsc); } -void intel_root_gt_init_early(struct drm_i915_private *i915); +void intel_gt_common_init_early(struct intel_gt *gt); +int intel_root_gt_init_early(struct drm_i915_private *i915); int intel_gt_assign_ggtt(struct intel_gt *gt); int intel_gt_init_mmio(struct intel_gt *gt); int __must_check intel_gt_init_hw(struct intel_gt *gt); @@ -54,7 +55,6 @@ void intel_gt_driver_register(struct intel_gt *gt); void intel_gt_driver_unregister(struct intel_gt *gt); void intel_gt_driver_remove(struct intel_gt *gt); void intel_gt_driver_release(struct intel_gt *gt); - void intel_gt_driver_late_release_all(struct drm_i915_private *i915); int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout); diff --git a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c index d5d1b04dbcad..3f656d3dba9a 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c @@ -26,26 +26,6 @@ static u32 read_reference_ts_freq(struct intel_uncore *uncore) return base_freq + frac_freq; } -static u32 gen9_get_crystal_clock_freq(struct intel_uncore *uncore, - u32 rpm_config_reg) -{ - u32 f19_2_mhz = 19200000; - u32 f24_mhz = 24000000; - u32 crystal_clock = - (rpm_config_reg & GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >> - GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT; - - switch (crystal_clock) { - case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ: - return f19_2_mhz; - case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ: - return f24_mhz; - default: - MISSING_CASE(crystal_clock); - return 0; - } -} - static u32 gen11_get_crystal_clock_freq(struct intel_uncore *uncore, u32 rpm_config_reg) { @@ -72,98 +52,106 @@ static u32 gen11_get_crystal_clock_freq(struct intel_uncore *uncore, } } -static u32 read_clock_frequency(struct intel_uncore *uncore) +static u32 gen11_read_clock_frequency(struct intel_uncore *uncore) { - u32 f12_5_mhz = 12500000; - u32 f19_2_mhz = 19200000; - u32 f24_mhz = 24000000; + u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE); + u32 freq = 0; + + /* + * Note that on gen11+, the clock frequency may be reconfigured. + * We do not, and we assume nobody else does. + * + * First figure out the reference frequency. There are 2 ways + * we can compute the frequency, either through the + * TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE + * tells us which one we should use. + */ + if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) { + freq = read_reference_ts_freq(uncore); + } else { + u32 c0 = intel_uncore_read(uncore, RPM_CONFIG0); + + freq = gen11_get_crystal_clock_freq(uncore, c0); - if (GRAPHICS_VER(uncore->i915) <= 4) { - /* - * PRMs say: - * - * "The value in this register increments once every 16 - * hclks." (through the “Clocking Configuration” - * (“CLKCFG”) MCHBAR register) - */ - return RUNTIME_INFO(uncore->i915)->rawclk_freq * 1000 / 16; - } else if (GRAPHICS_VER(uncore->i915) <= 8) { /* - * PRMs say: - * - * "The PCU TSC counts 10ns increments; this timestamp - * reflects bits 38:3 of the TSC (i.e. 80ns granularity, - * rolling over every 1.5 hours). + * Now figure out how the command stream's timestamp + * register increments from this frequency (it might + * increment only every few clock cycle). */ - return f12_5_mhz; - } else if (GRAPHICS_VER(uncore->i915) <= 9) { - u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE); - u32 freq = 0; - - if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) { - freq = read_reference_ts_freq(uncore); - } else { - freq = IS_GEN9_LP(uncore->i915) ? f19_2_mhz : f24_mhz; - - /* - * Now figure out how the command stream's timestamp - * register increments from this frequency (it might - * increment only every few clock cycle). - */ - freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >> - CTC_SHIFT_PARAMETER_SHIFT); - } - - return freq; - } else if (GRAPHICS_VER(uncore->i915) <= 12) { - u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE); - u32 freq = 0; + freq >>= 3 - ((c0 & GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >> + GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT); + } + + return freq; +} + +static u32 gen9_read_clock_frequency(struct intel_uncore *uncore) +{ + u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE); + u32 freq = 0; + + if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) { + freq = read_reference_ts_freq(uncore); + } else { + freq = IS_GEN9_LP(uncore->i915) ? 19200000 : 24000000; /* - * First figure out the reference frequency. There are 2 ways - * we can compute the frequency, either through the - * TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE - * tells us which one we should use. + * Now figure out how the command stream's timestamp + * register increments from this frequency (it might + * increment only every few clock cycle). */ - if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) { - freq = read_reference_ts_freq(uncore); - } else { - u32 c0 = intel_uncore_read(uncore, RPM_CONFIG0); - - if (GRAPHICS_VER(uncore->i915) >= 11) - freq = gen11_get_crystal_clock_freq(uncore, c0); - else - freq = gen9_get_crystal_clock_freq(uncore, c0); - - /* - * Now figure out how the command stream's timestamp - * register increments from this frequency (it might - * increment only every few clock cycle). - */ - freq >>= 3 - ((c0 & GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >> - GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT); - } - - return freq; + freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >> + CTC_SHIFT_PARAMETER_SHIFT); } - MISSING_CASE("Unknown gen, unable to read command streamer timestamp frequency\n"); - return 0; + return freq; } -void intel_gt_init_clock_frequency(struct intel_gt *gt) +static u32 gen5_read_clock_frequency(struct intel_uncore *uncore) { /* - * Note that on gen11+, the clock frequency may be reconfigured. - * We do not, and we assume nobody else does. + * PRMs say: + * + * "The PCU TSC counts 10ns increments; this timestamp + * reflects bits 38:3 of the TSC (i.e. 80ns granularity, + * rolling over every 1.5 hours). + */ + return 12500000; +} + +static u32 gen2_read_clock_frequency(struct intel_uncore *uncore) +{ + /* + * PRMs say: + * + * "The value in this register increments once every 16 + * hclks." (through the “Clocking Configuration” + * (“CLKCFG”) MCHBAR register) */ + return RUNTIME_INFO(uncore->i915)->rawclk_freq * 1000 / 16; +} + +static u32 read_clock_frequency(struct intel_uncore *uncore) +{ + if (GRAPHICS_VER(uncore->i915) >= 11) + return gen11_read_clock_frequency(uncore); + else if (GRAPHICS_VER(uncore->i915) >= 9) + return gen9_read_clock_frequency(uncore); + else if (GRAPHICS_VER(uncore->i915) >= 5) + return gen5_read_clock_frequency(uncore); + else + return gen2_read_clock_frequency(uncore); +} + +void intel_gt_init_clock_frequency(struct intel_gt *gt) +{ gt->clock_frequency = read_clock_frequency(gt->uncore); - if (gt->clock_frequency) - gt->clock_period_ns = intel_gt_clock_interval_to_ns(gt, 1); /* Icelake appears to use another fixed frequency for CTX_TIMESTAMP */ if (GRAPHICS_VER(gt->i915) == 11) gt->clock_period_ns = NSEC_PER_SEC / 13750000; + else if (gt->clock_frequency) + gt->clock_period_ns = intel_gt_clock_interval_to_ns(gt, 1); GT_TRACE(gt, "Using clock frequency: %dkHz, period: %dns, wrap: %lldms\n", diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c index 3a72d4fd0214..f26882fdc24c 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c @@ -29,7 +29,7 @@ gen11_gt_engine_identity(struct intel_gt *gt, u32 timeout_ts; u32 ident; - lockdep_assert_held(>->irq_lock); + lockdep_assert_held(gt->irq_lock); raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit)); @@ -59,11 +59,17 @@ static void gen11_other_irq_handler(struct intel_gt *gt, const u8 instance, const u16 iir) { + struct intel_gt *media_gt = gt->i915->media_gt; + if (instance == OTHER_GUC_INSTANCE) return guc_irq_handler(>->uc.guc, iir); + if (instance == OTHER_MEDIA_GUC_INSTANCE && media_gt) + return guc_irq_handler(&media_gt->uc.guc, iir); if (instance == OTHER_GTPM_INSTANCE) return gen11_rps_irq_handler(>->rps, iir); + if (instance == OTHER_MEDIA_GTPM_INSTANCE && media_gt) + return gen11_rps_irq_handler(&media_gt->rps, iir); if (instance == OTHER_KCR_INSTANCE) return intel_pxp_irq_handler(>->pxp, iir); @@ -81,6 +87,18 @@ gen11_engine_irq_handler(struct intel_gt *gt, const u8 class, { struct intel_engine_cs *engine; + /* + * Platforms with standalone media have their media engines in another + * GT. + */ + if (MEDIA_VER(gt->i915) >= 13 && + (class == VIDEO_DECODE_CLASS || class == VIDEO_ENHANCEMENT_CLASS)) { + if (!gt->i915->media_gt) + goto err; + + gt = gt->i915->media_gt; + } + if (instance <= MAX_ENGINE_INSTANCE) engine = gt->engine_class[class][instance]; else @@ -89,6 +107,7 @@ gen11_engine_irq_handler(struct intel_gt *gt, const u8 class, if (likely(engine)) return intel_engine_cs_irq(engine, iir); +err: WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n", class, instance); } @@ -120,7 +139,7 @@ gen11_gt_bank_handler(struct intel_gt *gt, const unsigned int bank) unsigned long intr_dw; unsigned int bit; - lockdep_assert_held(>->irq_lock); + lockdep_assert_held(gt->irq_lock); intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); @@ -138,14 +157,14 @@ void gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl) { unsigned int bank; - spin_lock(>->irq_lock); + spin_lock(gt->irq_lock); for (bank = 0; bank < 2; bank++) { if (master_ctl & GEN11_GT_DW_IRQ(bank)) gen11_gt_bank_handler(gt, bank); } - spin_unlock(>->irq_lock); + spin_unlock(gt->irq_lock); } bool gen11_gt_reset_one_iir(struct intel_gt *gt, @@ -154,7 +173,7 @@ bool gen11_gt_reset_one_iir(struct intel_gt *gt, void __iomem * const regs = gt->uncore->regs; u32 dw; - lockdep_assert_held(>->irq_lock); + lockdep_assert_held(gt->irq_lock); dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); if (dw & BIT(bit)) { @@ -310,9 +329,9 @@ static void gen7_parity_error_irq_handler(struct intel_gt *gt, u32 iir) if (!HAS_L3_DPF(gt->i915)) return; - spin_lock(>->irq_lock); + spin_lock(gt->irq_lock); gen5_gt_disable_irq(gt, GT_PARITY_ERROR(gt->i915)); - spin_unlock(>->irq_lock); + spin_unlock(gt->irq_lock); if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) gt->i915->l3_parity.which_slice |= 1 << 1; @@ -434,7 +453,7 @@ static void gen5_gt_update_irq(struct intel_gt *gt, u32 interrupt_mask, u32 enabled_irq_mask) { - lockdep_assert_held(>->irq_lock); + lockdep_assert_held(gt->irq_lock); GEM_BUG_ON(enabled_irq_mask & ~interrupt_mask); diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c index 40bdd4cb629f..108b9e76c32e 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c @@ -504,8 +504,8 @@ void intel_gt_pm_frequency_dump(struct intel_gt *gt, struct drm_printer *p) drm_puts(p, "no P-state info available\n"); } - drm_printf(p, "Current CD clock frequency: %d kHz\n", i915->cdclk.hw.cdclk); - drm_printf(p, "Max CD clock frequency: %d kHz\n", i915->max_cdclk_freq); + drm_printf(p, "Current CD clock frequency: %d kHz\n", i915->display.cdclk.hw.cdclk); + drm_printf(p, "Max CD clock frequency: %d kHz\n", i915->display.cdclk.max_cdclk_freq); drm_printf(p, "Max pixel clock frequency: %d kHz\n", i915->max_dotclk_freq); intel_runtime_pm_put(uncore->rpm, wakeref); diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c index 11060f5a4c89..52f2a28b2058 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c @@ -37,7 +37,7 @@ static void gen6_gt_pm_update_irq(struct intel_gt *gt, WARN_ON(enabled_irq_mask & ~interrupt_mask); - lockdep_assert_held(>->irq_lock); + lockdep_assert_held(gt->irq_lock); new_val = gt->pm_imr; new_val &= ~interrupt_mask; @@ -64,7 +64,7 @@ void gen6_gt_pm_reset_iir(struct intel_gt *gt, u32 reset_mask) struct intel_uncore *uncore = gt->uncore; i915_reg_t reg = GRAPHICS_VER(gt->i915) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; - lockdep_assert_held(>->irq_lock); + lockdep_assert_held(gt->irq_lock); intel_uncore_write(uncore, reg, reset_mask); intel_uncore_write(uncore, reg, reset_mask); @@ -92,7 +92,7 @@ static void write_pm_ier(struct intel_gt *gt) void gen6_gt_pm_enable_irq(struct intel_gt *gt, u32 enable_mask) { - lockdep_assert_held(>->irq_lock); + lockdep_assert_held(gt->irq_lock); gt->pm_ier |= enable_mask; write_pm_ier(gt); @@ -101,7 +101,7 @@ void gen6_gt_pm_enable_irq(struct intel_gt *gt, u32 enable_mask) void gen6_gt_pm_disable_irq(struct intel_gt *gt, u32 disable_mask) { - lockdep_assert_held(>->irq_lock); + lockdep_assert_held(gt->irq_lock); gt->pm_ier &= ~disable_mask; gen6_gt_pm_mask_irq(gt, disable_mask); diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h index d414785003cc..2275ee47da95 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h @@ -1554,6 +1554,8 @@ #define OTHER_GTPM_INSTANCE 1 #define OTHER_KCR_INSTANCE 4 #define OTHER_GSC_INSTANCE 6 +#define OTHER_MEDIA_GUC_INSTANCE 16 +#define OTHER_MEDIA_GTPM_INSTANCE 17 #define GEN11_IIR_REG_SELECTOR(x) _MMIO(0x190070 + ((x) * 4)) @@ -1578,4 +1580,12 @@ #define GEN12_SFC_DONE(n) _MMIO(0x1cc000 + (n) * 0x1000) +/* + * Standalone Media's non-engine GT registers are located at their regular GT + * offsets plus 0x380000. This extra offset is stored inside the intel_uncore + * structure so that the existing code can be used for both GTs without + * modification. + */ +#define MTL_MEDIA_GSI_BASE 0x380000 + #endif /* __INTEL_GT_REGS__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h index 4d56f7d5a3be..f19c2de77ff6 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_types.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h @@ -81,8 +81,17 @@ struct gt_defaults { u32 max_freq; }; +enum intel_gt_type { + GT_PRIMARY, + GT_TILE, + GT_MEDIA, +}; + struct intel_gt { struct drm_i915_private *i915; + const char *name; + enum intel_gt_type type; + struct intel_uncore *uncore; struct i915_ggtt *ggtt; @@ -132,6 +141,20 @@ struct intel_gt { struct intel_wakeref wakeref; atomic_t user_wakeref; + /** + * Protects access to lmem usefault list. + * It is required, if we are outside of the runtime suspend path, + * access to @lmem_userfault_list requires always first grabbing the + * runtime pm, to ensure we can't race against runtime suspend. + * Once we have that we also need to grab @lmem_userfault_lock, + * at which point we have exclusive access. + * The runtime suspend path is special since it doesn't really hold any locks, + * but instead has exclusive access by virtue of all other accesses requiring + * holding the runtime pm wakeref. + */ + struct mutex lmem_userfault_lock; + struct list_head lmem_userfault_list; + struct list_head closed_vma; spinlock_t closed_lock; /* guards the list of closed_vma */ @@ -147,6 +170,9 @@ struct intel_gt { */ intel_wakeref_t awake; + /* Manual runtime pm autosuspend delay for user GGTT/lmem mmaps */ + struct intel_wakeref_auto userfault_wakeref; + u32 clock_frequency; u32 clock_period_ns; @@ -154,7 +180,7 @@ struct intel_gt { struct intel_rc6 rc6; struct intel_rps rps; - spinlock_t irq_lock; + spinlock_t *irq_lock; u32 gt_imr; u32 pm_ier; u32 pm_imr; @@ -262,6 +288,14 @@ struct intel_gt { struct kobject *sysfs_defaults; }; +struct intel_gt_definition { + enum intel_gt_type type; + char *name; + u32 mapping_base; + u32 gsi_offset; + intel_engine_mask_t engine_mask; +}; + enum intel_gt_scratch_field { /* 8 bytes */ INTEL_GT_SCRATCH_FIELD_DEFAULT = 0, diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h index e639434e97fd..c0ca53cba9f0 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.h +++ b/drivers/gpu/drm/i915/gt/intel_gtt.h @@ -386,9 +386,6 @@ struct i915_ggtt { */ struct list_head userfault_list; - /* Manual runtime pm autosuspend delay for user GGTT mmaps */ - struct intel_wakeref_auto userfault_wakeref; - struct mutex error_mutex; struct drm_mm_node error_capture; struct drm_mm_node uc_fw; diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 070cec4ff8a4..3955292483a6 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -662,6 +662,21 @@ static int lrc_ring_mi_mode(const struct intel_engine_cs *engine) return -1; } +static int lrc_ring_bb_offset(const struct intel_engine_cs *engine) +{ + if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) + return 0x80; + else if (GRAPHICS_VER(engine->i915) >= 12) + return 0x70; + else if (GRAPHICS_VER(engine->i915) >= 9) + return 0x64; + else if (GRAPHICS_VER(engine->i915) >= 8 && + engine->class == RENDER_CLASS) + return 0xc4; + else + return -1; +} + static int lrc_ring_gpr0(const struct intel_engine_cs *engine) { if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) @@ -768,6 +783,7 @@ static void init_common_regs(u32 * const regs, bool inhibit) { u32 ctl; + int loc; ctl = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH); ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); @@ -779,6 +795,10 @@ static void init_common_regs(u32 * const regs, regs[CTX_CONTEXT_CONTROL] = ctl; regs[CTX_TIMESTAMP] = ce->stats.runtime.last; + + loc = lrc_ring_bb_offset(engine); + if (loc != -1) + regs[loc + 1] = 0; } static void init_wa_bb_regs(u32 * const regs, @@ -1278,7 +1298,8 @@ gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs) /* hsdes: 1809175790 */ if (!HAS_FLAT_CCS(ce->engine->i915)) - cs = gen12_emit_aux_table_inv(cs, GEN12_GFX_CCS_AUX_NV); + cs = gen12_emit_aux_table_inv(ce->engine->gt, + cs, GEN12_GFX_CCS_AUX_NV); /* Wa_16014892111 */ if (IS_DG2(ce->engine->i915)) @@ -1304,9 +1325,11 @@ gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs) /* hsdes: 1809175790 */ if (!HAS_FLAT_CCS(ce->engine->i915)) { if (ce->engine->class == VIDEO_DECODE_CLASS) - cs = gen12_emit_aux_table_inv(cs, GEN12_VD0_AUX_NV); + cs = gen12_emit_aux_table_inv(ce->engine->gt, + cs, GEN12_VD0_AUX_NV); else if (ce->engine->class == VIDEO_ENHANCEMENT_CLASS) - cs = gen12_emit_aux_table_inv(cs, GEN12_VE0_AUX_NV); + cs = gen12_emit_aux_table_inv(ce->engine->gt, + cs, GEN12_VE0_AUX_NV); } return cs; diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c index 6ee8d1127016..7ecfa672f738 100644 --- a/drivers/gpu/drm/i915/gt/intel_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c @@ -312,7 +312,7 @@ void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt, ppgtt->vm.gt = gt; ppgtt->vm.i915 = i915; ppgtt->vm.dma = i915->drm.dev; - ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size); + ppgtt->vm.total = BIT_ULL(RUNTIME_INFO(i915)->ppgtt_size); ppgtt->vm.lmem_pt_obj_flags = lmem_pt_obj_flags; dma_resv_init(&ppgtt->vm._resv); diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c index aa6aed837194..f3ad93db0b21 100644 --- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c +++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c @@ -4,8 +4,10 @@ */ #include "i915_drv.h" +#include "i915_pci.h" #include "i915_reg.h" #include "intel_memory_region.h" +#include "intel_pci_config.h" #include "intel_region_lmem.h" #include "intel_region_ttm.h" #include "gem/i915_gem_lmem.h" @@ -45,7 +47,6 @@ _resize_bar(struct drm_i915_private *i915, int resno, resource_size_t size) drm_info(&i915->drm, "BAR%d resized to %dM\n", resno, 1 << bar_size); } -#define LMEM_BAR_NUM 2 static void i915_resize_lmem_bar(struct drm_i915_private *i915, resource_size_t lmem_size) { struct pci_dev *pdev = to_pci_dev(i915->drm.dev); @@ -56,15 +57,14 @@ static void i915_resize_lmem_bar(struct drm_i915_private *i915, resource_size_t u32 pci_cmd; int i; - current_size = roundup_pow_of_two(pci_resource_len(pdev, LMEM_BAR_NUM)); + current_size = roundup_pow_of_two(pci_resource_len(pdev, GEN12_LMEM_BAR)); if (i915->params.lmem_bar_size) { u32 bar_sizes; rebar_size = i915->params.lmem_bar_size * (resource_size_t)SZ_1M; - bar_sizes = pci_rebar_get_possible_sizes(pdev, - LMEM_BAR_NUM); + bar_sizes = pci_rebar_get_possible_sizes(pdev, GEN12_LMEM_BAR); if (rebar_size == current_size) return; @@ -107,7 +107,7 @@ static void i915_resize_lmem_bar(struct drm_i915_private *i915, resource_size_t pci_write_config_dword(pdev, PCI_COMMAND, pci_cmd & ~PCI_COMMAND_MEMORY); - _resize_bar(i915, LMEM_BAR_NUM, rebar_size); + _resize_bar(i915, GEN12_LMEM_BAR, rebar_size); pci_assign_unassigned_bus_resources(pdev->bus); pci_write_config_dword(pdev, PCI_COMMAND, pci_cmd); @@ -202,6 +202,9 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt) if (!IS_DGFX(i915)) return ERR_PTR(-ENODEV); + if (!i915_pci_resource_valid(pdev, GEN12_LMEM_BAR)) + return ERR_PTR(-ENXIO); + if (HAS_FLAT_CCS(i915)) { resource_size_t lmem_range; u64 tile_stolen, flat_ccs_base; @@ -236,8 +239,8 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt) mul_u32_u32(i915->params.lmem_size, SZ_1M)); } - io_start = pci_resource_start(pdev, 2); - io_size = min(pci_resource_len(pdev, 2), lmem_size); + io_start = pci_resource_start(pdev, GEN12_LMEM_BAR); + io_size = min(pci_resource_len(pdev, GEN12_LMEM_BAR), lmem_size); if (!io_size) return ERR_PTR(-EIO); diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c index 6fadde4ee7bf..6b86250c31ab 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.c +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -194,9 +194,9 @@ static void rps_enable_interrupts(struct intel_rps *rps) rps_reset_ei(rps); - spin_lock_irq(>->irq_lock); + spin_lock_irq(gt->irq_lock); gen6_gt_pm_enable_irq(gt, rps->pm_events); - spin_unlock_irq(>->irq_lock); + spin_unlock_irq(gt->irq_lock); intel_uncore_write(gt->uncore, GEN6_PMINTRMSK, rps_pm_mask(rps, rps->last_freq)); @@ -217,14 +217,14 @@ static void rps_reset_interrupts(struct intel_rps *rps) { struct intel_gt *gt = rps_to_gt(rps); - spin_lock_irq(>->irq_lock); + spin_lock_irq(gt->irq_lock); if (GRAPHICS_VER(gt->i915) >= 11) gen11_rps_reset_interrupts(rps); else gen6_rps_reset_interrupts(rps); rps->pm_iir = 0; - spin_unlock_irq(>->irq_lock); + spin_unlock_irq(gt->irq_lock); } static void rps_disable_interrupts(struct intel_rps *rps) @@ -234,9 +234,9 @@ static void rps_disable_interrupts(struct intel_rps *rps) intel_uncore_write(gt->uncore, GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u)); - spin_lock_irq(>->irq_lock); + spin_lock_irq(gt->irq_lock); gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS); - spin_unlock_irq(>->irq_lock); + spin_unlock_irq(gt->irq_lock); intel_synchronize_irq(gt->i915); @@ -1797,10 +1797,10 @@ static void rps_work(struct work_struct *work) int new_freq, adj, min, max; u32 pm_iir = 0; - spin_lock_irq(>->irq_lock); + spin_lock_irq(gt->irq_lock); pm_iir = fetch_and_zero(&rps->pm_iir) & rps->pm_events; client_boost = atomic_read(&rps->num_waiters); - spin_unlock_irq(>->irq_lock); + spin_unlock_irq(gt->irq_lock); /* Make sure we didn't queue anything we're not going to process. */ if (!pm_iir && !client_boost) @@ -1873,9 +1873,9 @@ static void rps_work(struct work_struct *work) mutex_unlock(&rps->lock); out: - spin_lock_irq(>->irq_lock); + spin_lock_irq(gt->irq_lock); gen6_gt_pm_unmask_irq(gt, rps->pm_events); - spin_unlock_irq(>->irq_lock); + spin_unlock_irq(gt->irq_lock); } void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir) @@ -1883,7 +1883,7 @@ void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir) struct intel_gt *gt = rps_to_gt(rps); const u32 events = rps->pm_events & pm_iir; - lockdep_assert_held(>->irq_lock); + lockdep_assert_held(gt->irq_lock); if (unlikely(!events)) return; @@ -1903,7 +1903,7 @@ void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir) events = pm_iir & rps->pm_events; if (events) { - spin_lock(>->irq_lock); + spin_lock(gt->irq_lock); GT_TRACE(gt, "irq events:%x\n", events); @@ -1911,7 +1911,7 @@ void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir) rps->pm_iir |= events; schedule_work(&rps->work); - spin_unlock(>->irq_lock); + spin_unlock(gt->irq_lock); } if (GRAPHICS_VER(gt->i915) >= 8) diff --git a/drivers/gpu/drm/i915/gt/intel_sa_media.c b/drivers/gpu/drm/i915/gt/intel_sa_media.c new file mode 100644 index 000000000000..e8f3d18c12b8 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_sa_media.c @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2021 Intel Corporation + */ + +#include <drm/drm_managed.h> + +#include "i915_drv.h" +#include "gt/intel_gt.h" +#include "gt/intel_sa_media.h" + +int intel_sa_mediagt_setup(struct intel_gt *gt, phys_addr_t phys_addr, + u32 gsi_offset) +{ + struct drm_i915_private *i915 = gt->i915; + struct intel_uncore *uncore; + + uncore = drmm_kzalloc(&i915->drm, sizeof(*uncore), GFP_KERNEL); + if (!uncore) + return -ENOMEM; + + uncore->gsi_offset = gsi_offset; + + gt->irq_lock = to_gt(i915)->irq_lock; + intel_gt_common_init_early(gt); + intel_uncore_init_early(uncore, gt); + + /* + * Standalone media shares the general MMIO space with the primary + * GT. We'll re-use the primary GT's mapping. + */ + uncore->regs = i915->uncore.regs; + if (drm_WARN_ON(&i915->drm, uncore->regs == NULL)) + return -EIO; + + gt->uncore = uncore; + gt->phys_addr = phys_addr; + + /* + * For current platforms we can assume there's only a single + * media GT and cache it for quick lookup. + */ + drm_WARN_ON(&i915->drm, i915->media_gt); + i915->media_gt = gt; + + return 0; +} diff --git a/drivers/gpu/drm/i915/gt/intel_sa_media.h b/drivers/gpu/drm/i915/gt/intel_sa_media.h new file mode 100644 index 000000000000..3afb310de932 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_sa_media.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ +#ifndef __INTEL_SA_MEDIA__ +#define __INTEL_SA_MEDIA__ + +#include <linux/types.h> + +struct intel_gt; + +int intel_sa_mediagt_setup(struct intel_gt *gt, phys_addr_t phys_addr, + u32 gsi_offset); + +#endif /* __INTEL_SA_MEDIA_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c index c6d3050604c8..66f21c735d54 100644 --- a/drivers/gpu/drm/i915/gt/intel_sseu.c +++ b/drivers/gpu/drm/i915/gt/intel_sseu.c @@ -382,7 +382,6 @@ static void cherryview_sseu_info_init(struct intel_gt *gt) static void gen9_sseu_info_init(struct intel_gt *gt) { struct drm_i915_private *i915 = gt->i915; - struct intel_device_info *info = mkwrite_device_info(i915); struct sseu_dev_info *sseu = >->info.sseu; struct intel_uncore *uncore = gt->uncore; u32 fuse2, eu_disable, subslice_mask; @@ -471,10 +470,10 @@ static void gen9_sseu_info_init(struct intel_gt *gt) if (IS_GEN9_LP(i915)) { #define IS_SS_DISABLED(ss) (!(sseu->subslice_mask.hsw[0] & BIT(ss))) - info->has_pooled_eu = hweight8(sseu->subslice_mask.hsw[0]) == 3; + RUNTIME_INFO(i915)->has_pooled_eu = hweight8(sseu->subslice_mask.hsw[0]) == 3; sseu->min_eu_in_pool = 0; - if (info->has_pooled_eu) { + if (HAS_POOLED_EU(i915)) { if (IS_SS_DISABLED(2) || IS_SS_DISABLED(0)) sseu->min_eu_in_pool = 3; else if (IS_SS_DISABLED(1)) diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index 1109088fe8f6..82d3f8058995 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -27,6 +27,9 @@ #define NUM_GPR 16 #define NUM_GPR_DW (NUM_GPR * 2) /* each GPR is 2 dwords */ +#define LRI_HEADER MI_INSTR(0x22, 0) +#define LRI_LENGTH_MASK GENMASK(7, 0) + static struct i915_vma *create_scratch(struct intel_gt *gt) { return __vm_create_scratch_for_read_pinned(>->ggtt->vm, PAGE_SIZE); @@ -202,7 +205,7 @@ static int live_lrc_layout(void *arg) continue; } - if ((lri & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) { + if ((lri & GENMASK(31, 23)) != LRI_HEADER) { pr_err("%s: Expected LRI command at dword %d, found %08x\n", engine->name, dw, lri); err = -EINVAL; @@ -357,6 +360,11 @@ static int live_lrc_fixed(void *arg) lrc_ring_cmd_buf_cctl(engine), "RING_CMD_BUF_CCTL" }, + { + i915_mmio_reg_offset(RING_BB_OFFSET(engine->mmio_base)), + lrc_ring_bb_offset(engine), + "RING_BB_OFFSET" + }, { }, }, *t; u32 *hw; @@ -987,18 +995,40 @@ store_context(struct intel_context *ce, struct i915_vma *scratch) hw = defaults; hw += LRC_STATE_OFFSET / sizeof(*hw); do { - u32 len = hw[dw] & 0x7f; + u32 len = hw[dw] & LRI_LENGTH_MASK; + + /* + * Keep it simple, skip parsing complex commands + * + * At present, there are no more MI_LOAD_REGISTER_IMM + * commands after the first 3D state command. Rather + * than include a table (see i915_cmd_parser.c) of all + * the possible commands and their instruction lengths + * (or mask for variable length instructions), assume + * we have gathered the complete list of registers and + * bail out. + */ + if ((hw[dw] >> INSTR_CLIENT_SHIFT) != INSTR_MI_CLIENT) + break; if (hw[dw] == 0) { dw++; continue; } - if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) { + if ((hw[dw] & GENMASK(31, 23)) != LRI_HEADER) { + /* Assume all other MI commands match LRI length mask */ dw += len + 2; continue; } + if (!len) { + pr_err("%s: invalid LRI found in context image\n", + ce->engine->name); + igt_hexdump(defaults, PAGE_SIZE); + break; + } + dw++; len = (len + 1) / 2; while (len--) { @@ -1150,18 +1180,29 @@ static struct i915_vma *load_context(struct intel_context *ce, u32 poison) hw = defaults; hw += LRC_STATE_OFFSET / sizeof(*hw); do { - u32 len = hw[dw] & 0x7f; + u32 len = hw[dw] & LRI_LENGTH_MASK; + + /* For simplicity, break parsing at the first complex command */ + if ((hw[dw] >> INSTR_CLIENT_SHIFT) != INSTR_MI_CLIENT) + break; if (hw[dw] == 0) { dw++; continue; } - if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) { + if ((hw[dw] & GENMASK(31, 23)) != LRI_HEADER) { dw += len + 2; continue; } + if (!len) { + pr_err("%s: invalid LRI found in context image\n", + ce->engine->name); + igt_hexdump(defaults, PAGE_SIZE); + break; + } + dw++; len = (len + 1) / 2; *cs++ = MI_LOAD_REGISTER_IMM(len); @@ -1292,18 +1333,29 @@ static int compare_isolation(struct intel_engine_cs *engine, hw = defaults; hw += LRC_STATE_OFFSET / sizeof(*hw); do { - u32 len = hw[dw] & 0x7f; + u32 len = hw[dw] & LRI_LENGTH_MASK; + + /* For simplicity, break parsing at the first complex command */ + if ((hw[dw] >> INSTR_CLIENT_SHIFT) != INSTR_MI_CLIENT) + break; if (hw[dw] == 0) { dw++; continue; } - if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) { + if ((hw[dw] & GENMASK(31, 23)) != LRI_HEADER) { dw += len + 2; continue; } + if (!len) { + pr_err("%s: invalid LRI found in context image\n", + engine->name); + igt_hexdump(defaults, PAGE_SIZE); + break; + } + dw++; len = (len + 1) / 2; while (len--) { @@ -1343,6 +1395,30 @@ err_A0: return err; } +static struct i915_vma * +create_result_vma(struct i915_address_space *vm, unsigned long sz) +{ + struct i915_vma *vma; + void *ptr; + + vma = create_user_vma(vm, sz); + if (IS_ERR(vma)) + return vma; + + /* Set the results to a known value distinct from the poison */ + ptr = i915_gem_object_pin_map_unlocked(vma->obj, I915_MAP_WC); + if (IS_ERR(ptr)) { + i915_vma_put(vma); + return ERR_CAST(ptr); + } + + memset(ptr, POISON_INUSE, vma->size); + i915_gem_object_flush_map(vma->obj); + i915_gem_object_unpin_map(vma->obj); + + return vma; +} + static int __lrc_isolation(struct intel_engine_cs *engine, u32 poison) { u32 *sema = memset32(engine->status_page.addr + 1000, 0, 1); @@ -1361,13 +1437,13 @@ static int __lrc_isolation(struct intel_engine_cs *engine, u32 poison) goto err_A; } - ref[0] = create_user_vma(A->vm, SZ_64K); + ref[0] = create_result_vma(A->vm, SZ_64K); if (IS_ERR(ref[0])) { err = PTR_ERR(ref[0]); goto err_B; } - ref[1] = create_user_vma(A->vm, SZ_64K); + ref[1] = create_result_vma(A->vm, SZ_64K); if (IS_ERR(ref[1])) { err = PTR_ERR(ref[1]); goto err_ref0; @@ -1389,13 +1465,13 @@ static int __lrc_isolation(struct intel_engine_cs *engine, u32 poison) } i915_request_put(rq); - result[0] = create_user_vma(A->vm, SZ_64K); + result[0] = create_result_vma(A->vm, SZ_64K); if (IS_ERR(result[0])) { err = PTR_ERR(result[0]); goto err_ref1; } - result[1] = create_user_vma(A->vm, SZ_64K); + result[1] = create_result_vma(A->vm, SZ_64K); if (IS_ERR(result[1])) { err = PTR_ERR(result[1]); goto err_result0; @@ -1408,18 +1484,17 @@ static int __lrc_isolation(struct intel_engine_cs *engine, u32 poison) } err = poison_registers(B, poison, sema); - if (err) { - WRITE_ONCE(*sema, -1); - i915_request_put(rq); - goto err_result1; - } - - if (i915_request_wait(rq, 0, HZ / 2) < 0) { - i915_request_put(rq); + if (err == 0 && i915_request_wait(rq, 0, HZ / 2) < 0) { + pr_err("%s(%s): wait for results timed out\n", + __func__, engine->name); err = -ETIME; - goto err_result1; } + + /* Always cancel the semaphore wait, just in case the GPU gets stuck */ + WRITE_ONCE(*sema, -1); i915_request_put(rq); + if (err) + goto err_result1; err = compare_isolation(engine, ref, result, A, poison); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index 24451d000a6a..bac06e3d6f2c 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -82,9 +82,9 @@ static void gen9_reset_guc_interrupts(struct intel_guc *guc) assert_rpm_wakelock_held(>->i915->runtime_pm); - spin_lock_irq(>->irq_lock); + spin_lock_irq(gt->irq_lock); gen6_gt_pm_reset_iir(gt, gt->pm_guc_events); - spin_unlock_irq(>->irq_lock); + spin_unlock_irq(gt->irq_lock); } static void gen9_enable_guc_interrupts(struct intel_guc *guc) @@ -93,11 +93,11 @@ static void gen9_enable_guc_interrupts(struct intel_guc *guc) assert_rpm_wakelock_held(>->i915->runtime_pm); - spin_lock_irq(>->irq_lock); + spin_lock_irq(gt->irq_lock); WARN_ON_ONCE(intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) & gt->pm_guc_events); gen6_gt_pm_enable_irq(gt, gt->pm_guc_events); - spin_unlock_irq(>->irq_lock); + spin_unlock_irq(gt->irq_lock); } static void gen9_disable_guc_interrupts(struct intel_guc *guc) @@ -106,11 +106,11 @@ static void gen9_disable_guc_interrupts(struct intel_guc *guc) assert_rpm_wakelock_held(>->i915->runtime_pm); - spin_lock_irq(>->irq_lock); + spin_lock_irq(gt->irq_lock); gen6_gt_pm_disable_irq(gt, gt->pm_guc_events); - spin_unlock_irq(>->irq_lock); + spin_unlock_irq(gt->irq_lock); intel_synchronize_irq(gt->i915); gen9_reset_guc_interrupts(guc); @@ -120,9 +120,9 @@ static void gen11_reset_guc_interrupts(struct intel_guc *guc) { struct intel_gt *gt = guc_to_gt(guc); - spin_lock_irq(>->irq_lock); + spin_lock_irq(gt->irq_lock); gen11_gt_reset_one_iir(gt, 0, GEN11_GUC); - spin_unlock_irq(>->irq_lock); + spin_unlock_irq(gt->irq_lock); } static void gen11_enable_guc_interrupts(struct intel_guc *guc) @@ -130,25 +130,25 @@ static void gen11_enable_guc_interrupts(struct intel_guc *guc) struct intel_gt *gt = guc_to_gt(guc); u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST); - spin_lock_irq(>->irq_lock); + spin_lock_irq(gt->irq_lock); WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC)); intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, events); intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~events); - spin_unlock_irq(>->irq_lock); + spin_unlock_irq(gt->irq_lock); } static void gen11_disable_guc_interrupts(struct intel_guc *guc) { struct intel_gt *gt = guc_to_gt(guc); - spin_lock_irq(>->irq_lock); + spin_lock_irq(gt->irq_lock); intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0); intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0); - spin_unlock_irq(>->irq_lock); + spin_unlock_irq(gt->irq_lock); intel_synchronize_irq(gt->i915); gen11_reset_guc_interrupts(guc); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 64c4e83153f4..22ba66e48a9b 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -1438,7 +1438,12 @@ void intel_guc_busyness_park(struct intel_gt *gt) if (!guc_submission_initialized(guc)) return; - cancel_delayed_work(&guc->timestamp.work); + /* + * There is a race with suspend flow where the worker runs after suspend + * and causes an unclaimed register access warning. Cancel the worker + * synchronously here. + */ + cancel_delayed_work_sync(&guc->timestamp.work); /* * Before parking, we should sample engine busyness stats if we need to. @@ -1532,8 +1537,8 @@ void intel_guc_submission_reset_prepare(struct intel_guc *guc) __reset_guc_busyness_stats(guc); /* Flush IRQ handler */ - spin_lock_irq(&guc_to_gt(guc)->irq_lock); - spin_unlock_irq(&guc_to_gt(guc)->irq_lock); + spin_lock_irq(guc_to_gt(guc)->irq_lock); + spin_unlock_irq(guc_to_gt(guc)->irq_lock); guc_flush_submissions(guc); guc_flush_destroyed_contexts(guc); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index abf4e142596d..dbd048b77e19 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -245,9 +245,9 @@ static int guc_enable_communication(struct intel_guc *guc) intel_guc_enable_interrupts(guc); /* check for CT messages received before we enabled interrupts */ - spin_lock_irq(>->irq_lock); + spin_lock_irq(gt->irq_lock); intel_guc_ct_event_handler(&guc->ct); - spin_unlock_irq(>->irq_lock); + spin_unlock_irq(gt->irq_lock); drm_dbg(&i915->drm, "GuC communication enabled\n"); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c index af425916cdf6..b91ad4aede1f 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c @@ -72,12 +72,14 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw, * security fixes, etc. to be enabled. */ #define INTEL_GUC_FIRMWARE_DEFS(fw_def, guc_maj, guc_mmp) \ - fw_def(DG2, 0, guc_mmp(dg2, 70, 4, 1)) \ + fw_def(DG2, 0, guc_maj(dg2, 70, 5)) \ + fw_def(ALDERLAKE_P, 0, guc_maj(adlp, 70, 5)) \ fw_def(ALDERLAKE_P, 0, guc_mmp(adlp, 70, 1, 1)) \ fw_def(ALDERLAKE_P, 0, guc_mmp(adlp, 69, 0, 3)) \ + fw_def(ALDERLAKE_S, 0, guc_maj(tgl, 70, 5)) \ fw_def(ALDERLAKE_S, 0, guc_mmp(tgl, 70, 1, 1)) \ fw_def(ALDERLAKE_S, 0, guc_mmp(tgl, 69, 0, 3)) \ - fw_def(DG1, 0, guc_mmp(dg1, 70, 1, 1)) \ + fw_def(DG1, 0, guc_maj(dg1, 70, 5)) \ fw_def(ROCKETLAKE, 0, guc_mmp(tgl, 70, 1, 1)) \ fw_def(TIGERLAKE, 0, guc_mmp(tgl, 70, 1, 1)) \ fw_def(JASPERLAKE, 0, guc_mmp(ehl, 70, 1, 1)) \ @@ -92,9 +94,11 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw, fw_def(SKYLAKE, 0, guc_mmp(skl, 70, 1, 1)) #define INTEL_HUC_FIRMWARE_DEFS(fw_def, huc_raw, huc_mmp) \ + fw_def(ALDERLAKE_P, 0, huc_raw(tgl)) \ fw_def(ALDERLAKE_P, 0, huc_mmp(tgl, 7, 9, 3)) \ + fw_def(ALDERLAKE_S, 0, huc_raw(tgl)) \ fw_def(ALDERLAKE_S, 0, huc_mmp(tgl, 7, 9, 3)) \ - fw_def(DG1, 0, huc_mmp(dg1, 7, 9, 3)) \ + fw_def(DG1, 0, huc_raw(dg1)) \ fw_def(ROCKETLAKE, 0, huc_mmp(tgl, 7, 9, 3)) \ fw_def(TIGERLAKE, 0, huc_mmp(tgl, 7, 9, 3)) \ fw_def(JASPERLAKE, 0, huc_mmp(ehl, 9, 0, 0)) \ @@ -232,6 +236,7 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw) u32 fw_count; u8 rev = INTEL_REVID(i915); int i; + bool found; /* * The only difference between the ADL GuC FWs is the HWConfig support. @@ -246,6 +251,7 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw) fw_blobs = blobs_all[uc_fw->type].blobs; fw_count = blobs_all[uc_fw->type].count; + found = false; for (i = 0; i < fw_count && p <= fw_blobs[i].p; i++) { const struct uc_fw_blob *blob = &fw_blobs[i].blob; @@ -266,9 +272,15 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw) uc_fw->file_wanted.path = blob->path; uc_fw->file_wanted.major_ver = blob->major; uc_fw->file_wanted.minor_ver = blob->minor; + found = true; break; } + if (!found && uc_fw->file_selected.path) { + /* Failed to find a match for the last attempt?! */ + uc_fw->file_selected.path = NULL; + } + /* make sure the list is ordered as expected */ if (IS_ENABLED(CONFIG_DRM_I915_SELFTEST) && !verified) { verified = true; @@ -322,7 +334,7 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw) continue; bad: - drm_err(&i915->drm, "\x1B[35;1mInvalid FW blob order: %s r%u %s%d.%d.%d comes before %s r%u %s%d.%d.%d\n", + drm_err(&i915->drm, "Invalid FW blob order: %s r%u %s%d.%d.%d comes before %s r%u %s%d.%d.%d\n", intel_platform_name(fw_blobs[i - 1].p), fw_blobs[i - 1].rev, fw_blobs[i - 1].blob.legacy ? "L" : "v", fw_blobs[i - 1].blob.major, @@ -553,10 +565,14 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw) err = firmware_request_nowarn(&fw, uc_fw->file_selected.path, dev); memcpy(&file_ideal, &uc_fw->file_wanted, sizeof(file_ideal)); - if (!err || intel_uc_fw_is_overridden(uc_fw)) - goto done; + + /* Any error is terminal if overriding. Don't bother searching for older versions */ + if (err && intel_uc_fw_is_overridden(uc_fw)) + goto fail; while (err == -ENOENT) { + old_ver = true; + __uc_fw_auto_select(i915, uc_fw); if (!uc_fw->file_selected.path) { /* @@ -576,8 +592,6 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw) if (err) goto fail; - old_ver = true; -done: if (uc_fw->loaded_via_gsc) err = check_gsc_manifest(fw, uc_fw); else diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c index dad3a6054335..eef3bba8a41b 100644 --- a/drivers/gpu/drm/i915/gvt/cfg_space.c +++ b/drivers/gpu/drm/i915/gvt/cfg_space.c @@ -33,6 +33,7 @@ #include "i915_drv.h" #include "gvt.h" +#include "intel_pci_config.h" enum { INTEL_GVT_PCI_BAR_GTTMMIO = 0, @@ -353,9 +354,9 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4); vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size = - pci_resource_len(pdev, 0); + pci_resource_len(pdev, GTTMMADR_BAR); vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size = - pci_resource_len(pdev, 2); + pci_resource_len(pdev, GTT_APERTURE_BAR); memset(vgpu_cfg_space(vgpu) + PCI_ROM_ADDRESS, 0, 4); diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c index a30ba2d7b7ba..1b509c1a1e33 100644 --- a/drivers/gpu/drm/i915/gvt/edid.c +++ b/drivers/gpu/drm/i915/gvt/edid.c @@ -32,9 +32,10 @@ * */ +#include "display/intel_gmbus_regs.h" +#include "gvt.h" #include "i915_drv.h" #include "i915_reg.h" -#include "gvt.h" #define GMBUS1_TOTAL_BYTES_SHIFT 16 #define GMBUS1_TOTAL_BYTES_MASK 0x1ff diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index beea5895e499..e7e33f95cc51 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -498,7 +498,7 @@ static u32 bdw_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port) switch (wrpll_ctl & WRPLL_REF_MASK) { case WRPLL_REF_PCH_SSC: - refclk = vgpu->gvt->gt->i915->dpll.ref_clks.ssc; + refclk = vgpu->gvt->gt->i915->display.dpll.ref_clks.ssc; break; case WRPLL_REF_LCPLL: refclk = 2700000; @@ -529,7 +529,7 @@ out: static u32 bxt_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port) { u32 dp_br = 0; - int refclk = vgpu->gvt->gt->i915->dpll.ref_clks.nssc; + int refclk = vgpu->gvt->gt->i915->display.dpll.ref_clks.nssc; enum dpio_phy phy = DPIO_PHY0; enum dpio_channel ch = DPIO_CH0; struct dpll clock = {0}; diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 3a8450058548..ae987e92251d 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -66,8 +66,7 @@ static int i915_capabilities(struct seq_file *m, void *data) seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(i915)); - intel_device_info_print_static(INTEL_INFO(i915), &p); - intel_device_info_print_runtime(RUNTIME_INFO(i915), &p); + intel_device_info_print(INTEL_INFO(i915), RUNTIME_INFO(i915), &p); i915_print_iommu_status(i915, &p); intel_gt_info_print(&to_gt(i915)->info, &p); intel_driver_caps_print(&i915->caps, &p); @@ -411,7 +410,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data) seq_printf(m, "bit6 swizzle for Y-tiling = %s\n", swizzle_string(to_gt(dev_priv)->ggtt->bit_6_swizzle_y)); - if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) + if (dev_priv->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) seq_puts(m, "L-shaped memory detected\n"); /* On BDW+, swizzling is not used. See detect_bit_6_swizzle() */ @@ -493,7 +492,7 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused) seq_puts(m, "Runtime power management not supported\n"); seq_printf(m, "Runtime power status: %s\n", - str_enabled_disabled(!dev_priv->power_domains.init_wakeref)); + str_enabled_disabled(!dev_priv->display.power.domains.init_wakeref)); seq_printf(m, "GPU idle: %s\n", str_yes_no(!to_gt(dev_priv)->awake)); seq_printf(m, "IRQs disabled: %s\n", diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c index deb8a8b76965..c459eb362c47 100644 --- a/drivers/gpu/drm/i915/i915_driver.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -61,6 +61,7 @@ #include "display/intel_pps.h" #include "display/intel_sprite.h" #include "display/intel_vga.h" +#include "display/skl_watermark.h" #include "gem/i915_gem_context.h" #include "gem/i915_gem_create.h" @@ -105,6 +106,12 @@ static const char irst_name[] = "INT3392"; static const struct drm_driver i915_drm_driver; +static void i915_release_bridge_dev(struct drm_device *dev, + void *bridge) +{ + pci_dev_put(bridge); +} + static int i915_get_bridge_dev(struct drm_i915_private *dev_priv) { int domain = pci_domain_nr(to_pci_dev(dev_priv->drm.dev)->bus); @@ -115,7 +122,9 @@ static int i915_get_bridge_dev(struct drm_i915_private *dev_priv) drm_err(&dev_priv->drm, "bridge device not found\n"); return -EIO; } - return 0; + + return drmm_add_action_or_reset(&dev_priv->drm, i915_release_bridge_dev, + dev_priv->bridge_dev); } /* Allocate space for the MCH regs if needed, return nonzero on error */ @@ -252,8 +261,8 @@ static int i915_workqueues_init(struct drm_i915_private *dev_priv) if (dev_priv->wq == NULL) goto out_err; - dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0); - if (dev_priv->hotplug.dp_wq == NULL) + dev_priv->display.hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0); + if (dev_priv->display.hotplug.dp_wq == NULL) goto out_free_wq; return 0; @@ -268,7 +277,7 @@ out_err: static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv) { - destroy_workqueue(dev_priv->hotplug.dp_wq); + destroy_workqueue(dev_priv->display.hotplug.dp_wq); destroy_workqueue(dev_priv->wq); } @@ -302,8 +311,13 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv) static void sanitize_gpu(struct drm_i915_private *i915) { - if (!INTEL_INFO(i915)->gpu_reset_clobbers_display) - __intel_gt_reset(to_gt(i915), ALL_ENGINES); + if (!INTEL_INFO(i915)->gpu_reset_clobbers_display) { + struct intel_gt *gt; + unsigned int i; + + for_each_gt(gt, i915, i) + __intel_gt_reset(gt, ALL_ENGINES); + } } /** @@ -326,19 +340,19 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv) intel_device_info_subplatform_init(dev_priv); intel_step_init(dev_priv); - intel_uncore_mmio_debug_init_early(&dev_priv->mmio_debug); + intel_uncore_mmio_debug_init_early(dev_priv); spin_lock_init(&dev_priv->irq_lock); spin_lock_init(&dev_priv->gpu_error.lock); - mutex_init(&dev_priv->backlight_lock); + mutex_init(&dev_priv->display.backlight.lock); mutex_init(&dev_priv->sb_lock); cpu_latency_qos_add_request(&dev_priv->sb_qos, PM_QOS_DEFAULT_VALUE); - mutex_init(&dev_priv->audio.mutex); - mutex_init(&dev_priv->wm.wm_mutex); - mutex_init(&dev_priv->pps_mutex); - mutex_init(&dev_priv->hdcp_comp_mutex); + mutex_init(&dev_priv->display.audio.mutex); + mutex_init(&dev_priv->display.wm.wm_mutex); + mutex_init(&dev_priv->display.pps.mutex); + mutex_init(&dev_priv->display.hdcp.comp_mutex); i915_memcpy_init_early(dev_priv); intel_runtime_pm_init_early(&dev_priv->runtime_pm); @@ -357,7 +371,9 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv) intel_wopcm_init_early(&dev_priv->wopcm); - intel_root_gt_init_early(dev_priv); + ret = intel_root_gt_init_early(dev_priv); + if (ret < 0) + goto err_rootgt; i915_drm_clients_init(&dev_priv->clients, dev_priv); @@ -382,6 +398,7 @@ err_gem: i915_gem_cleanup_early(dev_priv); intel_gt_driver_late_release_all(dev_priv); i915_drm_clients_fini(&dev_priv->clients); +err_rootgt: intel_region_ttm_device_fini(dev_priv); err_ttm: vlv_suspend_cleanup(dev_priv); @@ -423,7 +440,8 @@ static void i915_driver_late_release(struct drm_i915_private *dev_priv) */ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv) { - int ret; + struct intel_gt *gt; + int ret, i; if (i915_inject_probe_failure(dev_priv)) return -ENODEV; @@ -432,17 +450,27 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv) if (ret < 0) return ret; - ret = intel_uncore_init_mmio(&dev_priv->uncore); - if (ret) - return ret; + for_each_gt(gt, dev_priv, i) { + ret = intel_uncore_init_mmio(gt->uncore); + if (ret) + return ret; + + ret = drmm_add_action_or_reset(&dev_priv->drm, + intel_uncore_fini_mmio, + gt->uncore); + if (ret) + return ret; + } /* Try to make sure MCHBAR is enabled before poking at it */ intel_setup_mchbar(dev_priv); intel_device_info_runtime_init(dev_priv); - ret = intel_gt_init_mmio(to_gt(dev_priv)); - if (ret) - goto err_uncore; + for_each_gt(gt, dev_priv, i) { + ret = intel_gt_init_mmio(gt); + if (ret) + goto err_uncore; + } /* As early as possible, scrub existing GPU state before clobbering */ sanitize_gpu(dev_priv); @@ -451,8 +479,6 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv) err_uncore: intel_teardown_mchbar(dev_priv); - intel_uncore_fini_mmio(&dev_priv->uncore); - pci_dev_put(dev_priv->bridge_dev); return ret; } @@ -464,8 +490,6 @@ err_uncore: static void i915_driver_mmio_release(struct drm_i915_private *dev_priv) { intel_teardown_mchbar(dev_priv); - intel_uncore_fini_mmio(&dev_priv->uncore); - pci_dev_put(dev_priv->bridge_dev); } /** @@ -715,6 +739,8 @@ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv) static void i915_driver_register(struct drm_i915_private *dev_priv) { struct drm_device *dev = &dev_priv->drm; + struct intel_gt *gt; + unsigned int i; i915_gem_driver_register(dev_priv); i915_pmu_register(dev_priv); @@ -734,7 +760,8 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) /* Depends on sysfs having been initialized */ i915_perf_register(dev_priv); - intel_gt_driver_register(to_gt(dev_priv)); + for_each_gt(gt, dev_priv, i) + intel_gt_driver_register(gt); intel_display_driver_register(dev_priv); @@ -753,6 +780,9 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) */ static void i915_driver_unregister(struct drm_i915_private *dev_priv) { + struct intel_gt *gt; + unsigned int i; + i915_switcheroo_unregister(dev_priv); intel_unregister_dsm_handler(); @@ -762,7 +792,8 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv) intel_display_driver_unregister(dev_priv); - intel_gt_driver_unregister(to_gt(dev_priv)); + for_each_gt(gt, dev_priv, i) + intel_gt_driver_unregister(gt); i915_perf_unregister(dev_priv); i915_pmu_unregister(dev_priv); @@ -784,6 +815,8 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv) { if (drm_debug_enabled(DRM_UT_DRIVER)) { struct drm_printer p = drm_debug_printer("i915 device info:"); + struct intel_gt *gt; + unsigned int i; drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s (subplatform=0x%x) gen=%i\n", INTEL_DEVID(dev_priv), @@ -793,10 +826,11 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv) INTEL_INFO(dev_priv)->platform), GRAPHICS_VER(dev_priv)); - intel_device_info_print_static(INTEL_INFO(dev_priv), &p); - intel_device_info_print_runtime(RUNTIME_INFO(dev_priv), &p); + intel_device_info_print(INTEL_INFO(dev_priv), + RUNTIME_INFO(dev_priv), &p); i915_print_iommu_status(dev_priv, &p); - intel_gt_info_print(&to_gt(dev_priv)->info, &p); + for_each_gt(gt, dev_priv, i) + intel_gt_info_print(>->info, &p); } if (IS_ENABLED(CONFIG_DRM_I915_DEBUG)) @@ -814,6 +848,7 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent) const struct intel_device_info *match_info = (struct intel_device_info *)ent->driver_data; struct intel_device_info *device_info; + struct intel_runtime_info *runtime; struct drm_i915_private *i915; i915 = devm_drm_dev_alloc(&pdev->dev, &i915_drm_driver, @@ -829,7 +864,11 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent) /* Setup the write-once "constant" device info */ device_info = mkwrite_device_info(i915); memcpy(device_info, match_info, sizeof(*device_info)); - RUNTIME_INFO(i915)->device_id = pdev->device; + + /* Initialize initial runtime info from static const data and pdev. */ + runtime = RUNTIME_INFO(i915); + memcpy(runtime, &INTEL_INFO(i915)->__runtime, sizeof(*runtime)); + runtime->device_id = pdev->device; return i915; } @@ -948,7 +987,9 @@ out_fini: void i915_driver_remove(struct drm_i915_private *i915) { - disable_rpm_wakeref_asserts(&i915->runtime_pm); + intel_wakeref_t wakeref; + + wakeref = intel_runtime_pm_get(&i915->runtime_pm); i915_driver_unregister(i915); @@ -972,18 +1013,19 @@ void i915_driver_remove(struct drm_i915_private *i915) i915_driver_hw_remove(i915); - enable_rpm_wakeref_asserts(&i915->runtime_pm); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); } static void i915_driver_release(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; + intel_wakeref_t wakeref; if (!dev_priv->do_release) return; - disable_rpm_wakeref_asserts(rpm); + wakeref = intel_runtime_pm_get(rpm); i915_gem_driver_release(dev_priv); @@ -994,7 +1036,8 @@ static void i915_driver_release(struct drm_device *dev) i915_driver_mmio_release(dev_priv); - enable_rpm_wakeref_asserts(rpm); + intel_runtime_pm_put(rpm, wakeref); + intel_runtime_pm_driver_release(rpm); i915_driver_late_release(dev_priv); @@ -1206,13 +1249,15 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) struct drm_i915_private *dev_priv = to_i915(dev); struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; - int ret; + struct intel_gt *gt; + int ret, i; disable_rpm_wakeref_asserts(rpm); i915_gem_suspend_late(dev_priv); - intel_uncore_suspend(&dev_priv->uncore); + for_each_gt(gt, dev_priv, i) + intel_uncore_suspend(gt->uncore); intel_power_domains_suspend(dev_priv, get_suspend_mode(dev_priv, hibernation)); @@ -1344,7 +1389,8 @@ static int i915_drm_resume_early(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); - int ret; + struct intel_gt *gt; + int ret, i; /* * We have a resume ordering issue with the snd-hda driver also @@ -1398,9 +1444,10 @@ static int i915_drm_resume_early(struct drm_device *dev) drm_err(&dev_priv->drm, "Resume prepare failed: %d, continuing anyway\n", ret); - intel_uncore_resume_early(&dev_priv->uncore); - - intel_gt_check_and_clear_faults(to_gt(dev_priv)); + for_each_gt(gt, dev_priv, i) { + intel_uncore_resume_early(gt->uncore); + intel_gt_check_and_clear_faults(gt); + } intel_display_power_resume_early(dev_priv); @@ -1580,7 +1627,8 @@ static int intel_runtime_suspend(struct device *kdev) { struct drm_i915_private *dev_priv = kdev_to_i915(kdev); struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; - int ret; + struct intel_gt *gt; + int ret, i; if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv))) return -ENODEV; @@ -1595,11 +1643,13 @@ static int intel_runtime_suspend(struct device *kdev) */ i915_gem_runtime_suspend(dev_priv); - intel_gt_runtime_suspend(to_gt(dev_priv)); + for_each_gt(gt, dev_priv, i) + intel_gt_runtime_suspend(gt); intel_runtime_pm_disable_interrupts(dev_priv); - intel_uncore_suspend(&dev_priv->uncore); + for_each_gt(gt, dev_priv, i) + intel_uncore_suspend(gt->uncore); intel_display_power_suspend(dev_priv); @@ -1663,7 +1713,8 @@ static int intel_runtime_resume(struct device *kdev) { struct drm_i915_private *dev_priv = kdev_to_i915(kdev); struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; - int ret; + struct intel_gt *gt; + int ret, i; if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv))) return -ENODEV; @@ -1683,7 +1734,8 @@ static int intel_runtime_resume(struct device *kdev) ret = vlv_resume_prepare(dev_priv, true); - intel_uncore_runtime_resume(&dev_priv->uncore); + for_each_gt(gt, dev_priv, i) + intel_uncore_runtime_resume(gt->uncore); intel_runtime_pm_enable_interrupts(dev_priv); @@ -1691,7 +1743,8 @@ static int intel_runtime_resume(struct device *kdev) * No point of rolling back things in case of an error, as the best * we can do is to hope that things will still work (and disable RPM). */ - intel_gt_runtime_resume(to_gt(dev_priv)); + for_each_gt(gt, dev_priv, i) + intel_gt_runtime_resume(gt); /* * On VLV/CHV display interrupts are part of the display @@ -1703,7 +1756,7 @@ static int intel_runtime_resume(struct device *kdev) intel_hpd_poll_disable(dev_priv); } - intel_enable_ipc(dev_priv); + skl_watermark_ipc_update(dev_priv); enable_rpm_wakeref_asserts(rpm); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 7c0a34a33fec..9f9372931fd2 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -34,20 +34,10 @@ #include <linux/pm_qos.h> -#include <drm/drm_connector.h> #include <drm/ttm/ttm_device.h> -#include "display/intel_cdclk.h" #include "display/intel_display.h" -#include "display/intel_display_power.h" -#include "display/intel_dmc.h" -#include "display/intel_dpll_mgr.h" -#include "display/intel_dsb.h" -#include "display/intel_fbc.h" -#include "display/intel_frontbuffer.h" -#include "display/intel_global_state.h" -#include "display/intel_gmbus.h" -#include "display/intel_opregion.h" +#include "display/intel_display_core.h" #include "gem/i915_gem_context_types.h" #include "gem/i915_gem_lmem.h" @@ -70,80 +60,24 @@ #include "intel_device_info.h" #include "intel_memory_region.h" #include "intel_pch.h" -#include "intel_pm_types.h" #include "intel_runtime_pm.h" #include "intel_step.h" #include "intel_uncore.h" #include "intel_wopcm.h" -struct dpll; struct drm_i915_clock_gating_funcs; struct drm_i915_gem_object; struct drm_i915_private; -struct intel_atomic_state; -struct intel_audio_funcs; -struct intel_cdclk_config; -struct intel_cdclk_funcs; -struct intel_cdclk_state; -struct intel_cdclk_vals; -struct intel_color_funcs; struct intel_connector; -struct intel_crtc; struct intel_dp; -struct intel_dpll_funcs; struct intel_encoder; -struct intel_fbdev; -struct intel_fdi_funcs; -struct intel_gmbus; -struct intel_hotplug_funcs; -struct intel_initial_plane_config; struct intel_limit; -struct intel_overlay; struct intel_overlay_error_state; struct vlv_s0ix_state; /* Threshold == 5 for long IRQs, 50 for short */ #define HPD_STORM_DEFAULT_THRESHOLD 50 -struct i915_hotplug { - struct delayed_work hotplug_work; - - const u32 *hpd, *pch_hpd; - - struct { - unsigned long last_jiffies; - int count; - enum { - HPD_ENABLED = 0, - HPD_DISABLED = 1, - HPD_MARK_DISABLED = 2 - } state; - } stats[HPD_NUM_PINS]; - u32 event_bits; - u32 retry_bits; - struct delayed_work reenable_work; - - u32 long_port_mask; - u32 short_port_mask; - struct work_struct dig_port_work; - - struct work_struct poll_init_work; - bool poll_enabled; - - unsigned int hpd_storm_threshold; - /* Whether or not to count short HPD IRQs in HPD storms */ - u8 hpd_short_storm_enabled; - - /* - * if we get a HPD irq from DP and a HPD irq from non-DP - * the non-DP HPD could block the workqueue on a mode config - * mutex getting, that userspace may have taken. However - * userspace is waiting on the DP workqueue to run which is - * blocked behind the non-DP one. - */ - struct workqueue_struct *dp_wq; -}; - #define I915_GEM_GPU_DOMAINS \ (I915_GEM_DOMAIN_RENDER | \ I915_GEM_DOMAIN_SAMPLER | \ @@ -151,55 +85,9 @@ struct i915_hotplug { I915_GEM_DOMAIN_INSTRUCTION | \ I915_GEM_DOMAIN_VERTEX) -struct sdvo_device_mapping { - u8 initialized; - u8 dvo_port; - u8 slave_addr; - u8 dvo_wiring; - u8 i2c_pin; - u8 ddc_pin; -}; - -/* functions used for watermark calcs for display. */ -struct drm_i915_wm_disp_funcs { - /* update_wm is for legacy wm management */ - void (*update_wm)(struct drm_i915_private *dev_priv); - int (*compute_pipe_wm)(struct intel_atomic_state *state, - struct intel_crtc *crtc); - int (*compute_intermediate_wm)(struct intel_atomic_state *state, - struct intel_crtc *crtc); - void (*initial_watermarks)(struct intel_atomic_state *state, - struct intel_crtc *crtc); - void (*atomic_update_watermarks)(struct intel_atomic_state *state, - struct intel_crtc *crtc); - void (*optimize_watermarks)(struct intel_atomic_state *state, - struct intel_crtc *crtc); - int (*compute_global_watermarks)(struct intel_atomic_state *state); -}; - -struct drm_i915_display_funcs { - /* Returns the active state of the crtc, and if the crtc is active, - * fills out the pipe-config with the hw state. */ - bool (*get_pipe_config)(struct intel_crtc *, - struct intel_crtc_state *); - void (*get_initial_plane_config)(struct intel_crtc *, - struct intel_initial_plane_config *); - void (*crtc_enable)(struct intel_atomic_state *state, - struct intel_crtc *crtc); - void (*crtc_disable)(struct intel_atomic_state *state, - struct intel_crtc *crtc); - void (*commit_modeset_enables)(struct intel_atomic_state *state); -}; - #define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */ -#define QUIRK_LVDS_SSC_DISABLE (1<<1) -#define QUIRK_INVERT_BRIGHTNESS (1<<2) -#define QUIRK_BACKLIGHT_PRESENT (1<<3) -#define QUIRK_PIN_SWIZZLED_PAGES (1<<5) -#define QUIRK_INCREASE_T12_DELAY (1<<6) -#define QUIRK_INCREASE_DDI_DISABLED_TIME (1<<7) -#define QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK (1<<8) +#define GEM_QUIRK_PIN_SWIZZLED_PAGES BIT(0) struct i915_suspend_saved_registers { u32 saveDSPARB; @@ -289,51 +177,8 @@ i915_fence_timeout(const struct drm_i915_private *i915) return i915_fence_context_timeout(i915, U64_MAX); } -/* Amount of SAGV/QGV points, BSpec precisely defines this */ -#define I915_NUM_QGV_POINTS 8 - #define HAS_HW_SAGV_WM(i915) (DISPLAY_VER(i915) >= 13 && !IS_DGFX(i915)) -/* Amount of PSF GV points, BSpec precisely defines this */ -#define I915_NUM_PSF_GV_POINTS 3 - -struct intel_vbt_data { - /* bdb version */ - u16 version; - - /* Feature bits */ - unsigned int int_tv_support:1; - unsigned int int_crt_support:1; - unsigned int lvds_use_ssc:1; - unsigned int int_lvds_support:1; - unsigned int display_clock_mode:1; - unsigned int fdi_rx_polarity_inverted:1; - int lvds_ssc_freq; - enum drm_panel_orientation orientation; - - bool override_afc_startup; - u8 override_afc_startup_val; - - int crt_ddc_pin; - - struct list_head display_devices; - struct list_head bdb_blocks; - - struct intel_bios_encoder_data *ports[I915_MAX_PORTS]; /* Non-NULL if port present. */ - struct sdvo_device_mapping sdvo_mappings[2]; -}; - -struct i915_frontbuffer_tracking { - spinlock_t lock; - - /* - * Tracking bits for delayed frontbuffer flushing du to gpu activity or - * scheduled flips. - */ - unsigned busy_bits; - unsigned flip_bits; -}; - struct i915_virtual_gpu { struct mutex lock; /* serialises sending of g2v_notify command pkts */ bool active; @@ -348,32 +193,11 @@ struct i915_selftest_stash { struct ida mock_region_instances; }; -/* intel_audio.c private */ -struct intel_audio_private { - /* Display internal audio functions */ - const struct intel_audio_funcs *funcs; - - /* hda/i915 audio component */ - struct i915_audio_component *component; - bool component_registered; - /* mutex for audio/video sync */ - struct mutex mutex; - int power_refcount; - u32 freq_cntrl; - - /* Used to save the pipe-to-encoder mapping for audio */ - struct intel_encoder *encoder_map[I915_MAX_PIPES]; - - /* necessary resource sharing with HDMI LPE audio driver. */ - struct { - struct platform_device *platdev; - int irq; - } lpe; -}; - struct drm_i915_private { struct drm_device drm; + struct intel_display display; + /* FIXME: Device release actions should all be moved to drmm_ */ bool do_release; @@ -417,27 +241,6 @@ struct drm_i915_private { struct intel_wopcm wopcm; - struct intel_dmc dmc; - - struct intel_gmbus *gmbus[GMBUS_NUM_PINS]; - - /** gmbus_mutex protects against concurrent usage of the single hw gmbus - * controller on different i2c buses. */ - struct mutex gmbus_mutex; - - /** - * Base address of where the gmbus and gpio blocks are located (either - * on PCH or on SoC for platforms without PCH). - */ - u32 gpio_mmio_base; - - /* MMIO base address for MIPI regs */ - u32 mipi_mmio_base; - - u32 pps_mmio_base; - - wait_queue_head_t gmbus_wait_queue; - struct pci_dev *bridge_dev; struct rb_root uabi_engines; @@ -461,48 +264,15 @@ struct drm_i915_private { }; u32 pipestat_irq_mask[I915_MAX_PIPES]; - struct i915_hotplug hotplug; - struct intel_fbc *fbc[I915_MAX_FBCS]; - struct intel_opregion opregion; - struct intel_vbt_data vbt; - bool preserve_bios_swizzle; - /* overlay */ - struct intel_overlay *overlay; - - /* backlight registers and fields in struct intel_panel */ - struct mutex backlight_lock; - - /* protects panel power sequencer state */ - struct mutex pps_mutex; - unsigned int fsb_freq, mem_freq, is_ddr3; unsigned int skl_preferred_vco_freq; - unsigned int max_cdclk_freq; unsigned int max_dotclk_freq; unsigned int hpll_freq; - unsigned int fdi_pll_freq; unsigned int czclk_freq; - struct { - /* The current hardware cdclk configuration */ - struct intel_cdclk_config hw; - - /* cdclk, divider, and ratio table from bspec */ - const struct intel_cdclk_vals *table; - - struct intel_global_obj obj; - } cdclk; - - struct { - /* The current hardware dbuf configuration */ - u8 enabled_slices; - - struct intel_global_obj obj; - } dbuf; - /** * wq - Driver workqueue for GEM. * @@ -512,40 +282,14 @@ struct drm_i915_private { */ struct workqueue_struct *wq; - /* ordered wq for modesets */ - struct workqueue_struct *modeset_wq; - /* unbound hipri wq for page flips/plane updates */ - struct workqueue_struct *flip_wq; - /* pm private clock gating functions */ const struct drm_i915_clock_gating_funcs *clock_gating_funcs; - /* pm display functions */ - const struct drm_i915_wm_disp_funcs *wm_disp; - - /* irq display functions */ - const struct intel_hotplug_funcs *hotplug_funcs; - - /* fdi display functions */ - const struct intel_fdi_funcs *fdi_funcs; - - /* display pll funcs */ - const struct intel_dpll_funcs *dpll_funcs; - - /* Display functions */ - const struct drm_i915_display_funcs *display; - - /* Display internal color functions */ - const struct intel_color_funcs *color_funcs; - - /* Display CDCLK functions */ - const struct intel_cdclk_funcs *cdclk_funcs; - /* PCH chipset type */ enum intel_pch pch_type; unsigned short pch_id; - unsigned long quirks; + unsigned long gem_quirks; struct drm_atomic_state *modeset_restore_state; struct drm_modeset_acquire_ctx reset_ctx; @@ -554,34 +298,8 @@ struct drm_i915_private { /* Kernel Modesetting */ - /** - * dpll and cdclk state is protected by connection_mutex - * dpll.lock serializes intel_{prepare,enable,disable}_shared_dpll. - * Must be global rather than per dpll, because on some platforms plls - * share registers. - */ - struct { - struct mutex lock; - - int num_shared_dpll; - struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; - const struct intel_dpll_mgr *mgr; - - struct { - int nssc; - int ssc; - } ref_clks; - } dpll; - struct list_head global_obj_list; - struct i915_frontbuffer_tracking fb_tracking; - - struct intel_atomic_helper { - struct llist_head free_list; - struct work_struct free_work; - } atomic_helper; - bool mchbar_need_disable; struct intel_l3_parity l3_parity; @@ -600,21 +318,8 @@ struct drm_i915_private { */ u32 edram_size_mb; - struct i915_power_domains power_domains; - struct i915_gpu_error gpu_error; - /* list of fbdev register on this device */ - struct intel_fbdev *fbdev; - struct work_struct fbdev_suspend_work; - - struct drm_property *broadcast_rgb_property; - struct drm_property *force_audio_property; - - u32 fdi_rx_config; - - /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */ - u32 chv_phy_control; /* * Shadows for CHV DPLL_MD regs to keep the state * checker somewhat working in the presence hardware @@ -627,51 +332,6 @@ struct drm_i915_private { struct i915_suspend_saved_registers regfile; struct vlv_s0ix_state *vlv_s0ix_state; - enum { - I915_SAGV_UNKNOWN = 0, - I915_SAGV_DISABLED, - I915_SAGV_ENABLED, - I915_SAGV_NOT_CONTROLLED - } sagv_status; - - u32 sagv_block_time_us; - - struct { - /* - * Raw watermark latency values: - * in 0.1us units for WM0, - * in 0.5us units for WM1+. - */ - /* primary */ - u16 pri_latency[5]; - /* sprite */ - u16 spr_latency[5]; - /* cursor */ - u16 cur_latency[5]; - /* - * Raw watermark memory latency values - * for SKL for all 8 levels - * in 1us units. - */ - u16 skl_latency[8]; - - /* current hardware state */ - union { - struct ilk_wm_values hw; - struct vlv_wm_values vlv; - struct g4x_wm_values g4x; - }; - - u8 max_level; - - /* - * Should be held around atomic WM register writing; also - * protects * intel_crtc->wm.active and - * crtc_state->wm.need_postvbl_update. - */ - struct mutex wm_mutex; - } wm; - struct dram_info { bool wm_lv_0_adjust_needed; u8 num_channels; @@ -689,18 +349,6 @@ struct drm_i915_private { u8 num_psf_gv_points; } dram_info; - struct intel_bw_info { - /* for each QGV point */ - unsigned int deratedbw[I915_NUM_QGV_POINTS]; - /* for each PSF GV point */ - unsigned int psf_bw[I915_NUM_PSF_GV_POINTS]; - u8 num_qgv_points; - u8 num_psf_gv_points; - u8 num_planes; - } max_bw[6]; - - struct intel_global_obj bw_obj; - struct intel_runtime_pm runtime_pm; struct i915_perf perf; @@ -716,6 +364,9 @@ struct drm_i915_private { struct kobject *sysfs_gt; + /* Quick lookup of media GT (current platforms only have one) */ + struct intel_gt *media_gt; + struct { struct i915_gem_contexts { spinlock_t lock; /* locks list */ @@ -733,9 +384,6 @@ struct drm_i915_private { struct file *mmap_singleton; } gem; - /* Window2 specifies time required to program DSB (Window2) in number of scan lines */ - u8 window2_delay; - u8 pch_ssc_use; /* For i915gm/i945gm vblank irq workaround */ @@ -743,31 +391,18 @@ struct drm_i915_private { bool irq_enabled; - union { - /* perform PHY state sanity checks? */ - bool chv_phy_assert[2]; - - /* - * DG2: Mask of PHYs that were not calibrated by the firmware - * and should not be used. - */ - u8 snps_phy_failed_calibration; - }; + /* + * DG2: Mask of PHYs that were not calibrated by the firmware + * and should not be used. + */ + u8 snps_phy_failed_calibration; bool ipc_enabled; - struct intel_audio_private audio; - struct i915_pmu pmu; struct i915_drm_clients clients; - struct i915_hdcp_comp_master *hdcp_master; - bool hdcp_comp_added; - - /* Mutex to protect the above hdcp component related values. */ - struct mutex hdcp_comp_mutex; - /* The TTM device structure. */ struct ttm_device bdev; @@ -826,28 +461,6 @@ static inline struct intel_gt *to_gt(struct drm_i915_private *i915) (engine__) && (engine__)->uabi_class == (class__); \ (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node))) -#define I915_GTT_OFFSET_NONE ((u32)-1) - -/* - * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is - * considered to be the frontbuffer for the given plane interface-wise. This - * doesn't mean that the hw necessarily already scans it out, but that any - * rendering (by the cpu or gpu) will land in the frontbuffer eventually. - * - * We have one bit per pipe and per scanout plane type. - */ -#define INTEL_FRONTBUFFER_BITS_PER_PIPE 8 -#define INTEL_FRONTBUFFER(pipe, plane_id) ({ \ - BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > 32); \ - BUILD_BUG_ON(I915_MAX_PLANES > INTEL_FRONTBUFFER_BITS_PER_PIPE); \ - BIT((plane_id) + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)); \ -}) -#define INTEL_FRONTBUFFER_OVERLAY(pipe) \ - BIT(INTEL_FRONTBUFFER_BITS_PER_PIPE - 1 + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)) -#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \ - GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1, \ - INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)) - #define INTEL_INFO(dev_priv) (&(dev_priv)->__info) #define RUNTIME_INFO(dev_priv) (&(dev_priv)->__runtime) #define DRIVER_CAPS(dev_priv) (&(dev_priv)->caps) @@ -856,19 +469,19 @@ static inline struct intel_gt *to_gt(struct drm_i915_private *i915) #define IP_VER(ver, rel) ((ver) << 8 | (rel)) -#define GRAPHICS_VER(i915) (INTEL_INFO(i915)->graphics.ver) -#define GRAPHICS_VER_FULL(i915) IP_VER(INTEL_INFO(i915)->graphics.ver, \ - INTEL_INFO(i915)->graphics.rel) +#define GRAPHICS_VER(i915) (RUNTIME_INFO(i915)->graphics.ip.ver) +#define GRAPHICS_VER_FULL(i915) IP_VER(RUNTIME_INFO(i915)->graphics.ip.ver, \ + RUNTIME_INFO(i915)->graphics.ip.rel) #define IS_GRAPHICS_VER(i915, from, until) \ (GRAPHICS_VER(i915) >= (from) && GRAPHICS_VER(i915) <= (until)) -#define MEDIA_VER(i915) (INTEL_INFO(i915)->media.ver) -#define MEDIA_VER_FULL(i915) IP_VER(INTEL_INFO(i915)->media.ver, \ - INTEL_INFO(i915)->media.rel) +#define MEDIA_VER(i915) (RUNTIME_INFO(i915)->media.ip.ver) +#define MEDIA_VER_FULL(i915) IP_VER(RUNTIME_INFO(i915)->media.ip.ver, \ + RUNTIME_INFO(i915)->media.ip.rel) #define IS_MEDIA_VER(i915, from, until) \ (MEDIA_VER(i915) >= (from) && MEDIA_VER(i915) <= (until)) -#define DISPLAY_VER(i915) (INTEL_INFO(i915)->display.ver) +#define DISPLAY_VER(i915) (RUNTIME_INFO(i915)->display.ip.ver) #define IS_DISPLAY_VER(i915, from, until) \ (DISPLAY_VER(i915) >= (from) && DISPLAY_VER(i915) <= (until)) @@ -1210,7 +823,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv) -#define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt_type) +#define INTEL_PPGTT(dev_priv) (RUNTIME_INFO(dev_priv)->ppgtt_type) #define HAS_PPGTT(dev_priv) \ (INTEL_PPGTT(dev_priv) != INTEL_PPGTT_NONE) #define HAS_FULL_PPGTT(dev_priv) \ @@ -1218,7 +831,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_PAGE_SIZES(dev_priv, sizes) ({ \ GEM_BUG_ON((sizes) == 0); \ - ((sizes) & ~INTEL_INFO(dev_priv)->page_sizes) == 0; \ + ((sizes) & ~RUNTIME_INFO(dev_priv)->page_sizes) == 0; \ }) #define HAS_OVERLAY(dev_priv) (INTEL_INFO(dev_priv)->display.has_overlay) @@ -1249,13 +862,15 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define I915_HAS_HOTPLUG(dev_priv) (INTEL_INFO(dev_priv)->display.has_hotplug) #define HAS_FW_BLC(dev_priv) (DISPLAY_VER(dev_priv) > 2) -#define HAS_FBC(dev_priv) (INTEL_INFO(dev_priv)->display.fbc_mask != 0) +#define HAS_FBC(dev_priv) (RUNTIME_INFO(dev_priv)->fbc_mask != 0) #define HAS_CUR_FBC(dev_priv) (!HAS_GMCH(dev_priv) && DISPLAY_VER(dev_priv) >= 7) #define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv)) #define HAS_DP_MST(dev_priv) (INTEL_INFO(dev_priv)->display.has_dp_mst) -#define HAS_DP20(dev_priv) (IS_DG2(dev_priv)) +#define HAS_DP20(dev_priv) (IS_DG2(dev_priv) || DISPLAY_VER(dev_priv) >= 14) + +#define HAS_DOUBLE_BUFFERED_M_N(dev_priv) (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) #define HAS_CDCLK_CRAWL(dev_priv) (INTEL_INFO(dev_priv)->display.has_cdclk_crawl) #define HAS_DDI(dev_priv) (INTEL_INFO(dev_priv)->display.has_ddi) @@ -1264,7 +879,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_PSR_HW_TRACKING(dev_priv) \ (INTEL_INFO(dev_priv)->display.has_psr_hw_tracking) #define HAS_PSR2_SEL_FETCH(dev_priv) (DISPLAY_VER(dev_priv) >= 12) -#define HAS_TRANSCODER(dev_priv, trans) ((INTEL_INFO(dev_priv)->display.cpu_transcoder_mask & BIT(trans)) != 0) +#define HAS_TRANSCODER(dev_priv, trans) ((RUNTIME_INFO(dev_priv)->cpu_transcoder_mask & BIT(trans)) != 0) #define HAS_RC6(dev_priv) (INTEL_INFO(dev_priv)->has_rc6) #define HAS_RC6p(dev_priv) (INTEL_INFO(dev_priv)->has_rc6p) @@ -1272,7 +887,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_RPS(dev_priv) (INTEL_INFO(dev_priv)->has_rps) -#define HAS_DMC(dev_priv) (INTEL_INFO(dev_priv)->display.has_dmc) +#define HAS_DMC(dev_priv) (RUNTIME_INFO(dev_priv)->has_dmc) #define HAS_HECI_PXP(dev_priv) \ (INTEL_INFO(dev_priv)->has_heci_pxp) @@ -1302,9 +917,11 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc) -#define HAS_REGION(i915, i) (INTEL_INFO(i915)->memory_regions & (i)) +#define HAS_REGION(i915, i) (RUNTIME_INFO(i915)->memory_regions & (i)) #define HAS_LMEM(i915) HAS_REGION(i915, REGION_LMEM) +#define HAS_EXTRA_GT_LIST(dev_priv) (INTEL_INFO(dev_priv)->extra_gt_list) + /* * Platform has the dedicated compression control state for each lmem surfaces * stored in lmem to support the 3D and media compression formats. @@ -1313,7 +930,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_GT_UC(dev_priv) (INTEL_INFO(dev_priv)->has_gt_uc) -#define HAS_POOLED_EU(dev_priv) (INTEL_INFO(dev_priv)->has_pooled_eu) +#define HAS_POOLED_EU(dev_priv) (RUNTIME_INFO(dev_priv)->has_pooled_eu) #define HAS_GLOBAL_MOCS_REGISTERS(dev_priv) (INTEL_INFO(dev_priv)->has_global_mocs) @@ -1335,9 +952,9 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define GT_FREQUENCY_MULTIPLIER 50 #define GEN9_FREQ_SCALER 3 -#define INTEL_NUM_PIPES(dev_priv) (hweight8(INTEL_INFO(dev_priv)->display.pipe_mask)) +#define INTEL_NUM_PIPES(dev_priv) (hweight8(RUNTIME_INFO(dev_priv)->pipe_mask)) -#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->display.pipe_mask != 0) +#define HAS_DISPLAY(dev_priv) (RUNTIME_INFO(dev_priv)->pipe_mask != 0) #define HAS_VRR(i915) (DISPLAY_VER(i915) >= 11) @@ -1355,85 +972,12 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_D12_PLANE_MINIMIZATION(dev_priv) (IS_ROCKETLAKE(dev_priv) || \ IS_ALDERLAKE_S(dev_priv)) -#define HAS_MBUS_JOINING(i915) (IS_ALDERLAKE_P(i915)) +#define HAS_MBUS_JOINING(i915) (IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14) #define HAS_3D_PIPELINE(i915) (INTEL_INFO(i915)->has_3d_pipeline) #define HAS_ONE_EU_PER_FUSE_BIT(i915) (INTEL_INFO(i915)->has_one_eu_per_fuse_bit) -/* i915_gem.c */ -void i915_gem_init_early(struct drm_i915_private *dev_priv); -void i915_gem_cleanup_early(struct drm_i915_private *dev_priv); - -static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915) -{ - /* - * A single pass should suffice to release all the freed objects (along - * most call paths) , but be a little more paranoid in that freeing - * the objects does take a little amount of time, during which the rcu - * callbacks could have added new objects into the freed list, and - * armed the work again. - */ - while (atomic_read(&i915->mm.free_count)) { - flush_work(&i915->mm.free_work); - flush_delayed_work(&i915->bdev.wq); - rcu_barrier(); - } -} - -static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915) -{ - /* - * Similar to objects above (see i915_gem_drain_freed-objects), in - * general we have workers that are armed by RCU and then rearm - * themselves in their callbacks. To be paranoid, we need to - * drain the workqueue a second time after waiting for the RCU - * grace period so that we catch work queued via RCU from the first - * pass. As neither drain_workqueue() nor flush_workqueue() report - * a result, we make an assumption that we only don't require more - * than 3 passes to catch all _recursive_ RCU delayed work. - * - */ - int pass = 3; - do { - flush_workqueue(i915->wq); - rcu_barrier(); - i915_gem_drain_freed_objects(i915); - } while (--pass); - drain_workqueue(i915->wq); -} - -struct i915_vma * __must_check -i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj, - struct i915_gem_ww_ctx *ww, - const struct i915_gtt_view *view, - u64 size, u64 alignment, u64 flags); - -struct i915_vma * __must_check -i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, - const struct i915_gtt_view *view, - u64 size, u64 alignment, u64 flags); - -int i915_gem_object_unbind(struct drm_i915_gem_object *obj, - unsigned long flags); -#define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0) -#define I915_GEM_OBJECT_UNBIND_BARRIER BIT(1) -#define I915_GEM_OBJECT_UNBIND_TEST BIT(2) -#define I915_GEM_OBJECT_UNBIND_VM_TRYLOCK BIT(3) -#define I915_GEM_OBJECT_UNBIND_ASYNC BIT(4) - -void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv); - -int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno); - -int __must_check i915_gem_init(struct drm_i915_private *dev_priv); -void i915_gem_driver_register(struct drm_i915_private *i915); -void i915_gem_driver_unregister(struct drm_i915_private *i915); -void i915_gem_driver_remove(struct drm_i915_private *dev_priv); -void i915_gem_driver_release(struct drm_i915_private *dev_priv); - -int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file); - /* intel_device_info.c */ static inline struct intel_device_info * mkwrite_device_info(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index a3373699835d..f18cc6270b2b 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -842,6 +842,10 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915) &to_gt(i915)->ggtt->userfault_list, userfault_link) __i915_gem_object_release_mmap_gtt(obj); + list_for_each_entry_safe(obj, on, + &to_gt(i915)->lmem_userfault_list, userfault_link) + i915_gem_object_runtime_pm_release_mmap_offset(obj); + /* * The fence will be lost when the device powers down. If any were * in use by hardware (i.e. they are pinned), we should not be powering @@ -1035,7 +1039,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, if (i915_gem_object_has_pages(obj) && i915_gem_object_is_tiled(obj) && - i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) { + i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) { if (obj->mm.madv == I915_MADV_WILLNEED) { GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj)); i915_gem_object_clear_tiling_quirk(obj); @@ -1085,14 +1089,50 @@ out: return err; } +/* + * A single pass should suffice to release all the freed objects (along most + * call paths), but be a little more paranoid in that freeing the objects does + * take a little amount of time, during which the rcu callbacks could have added + * new objects into the freed list, and armed the work again. + */ +void i915_gem_drain_freed_objects(struct drm_i915_private *i915) +{ + while (atomic_read(&i915->mm.free_count)) { + flush_work(&i915->mm.free_work); + flush_delayed_work(&i915->bdev.wq); + rcu_barrier(); + } +} + +/* + * Similar to objects above (see i915_gem_drain_freed-objects), in general we + * have workers that are armed by RCU and then rearm themselves in their + * callbacks. To be paranoid, we need to drain the workqueue a second time after + * waiting for the RCU grace period so that we catch work queued via RCU from + * the first pass. As neither drain_workqueue() nor flush_workqueue() report a + * result, we make an assumption that we only don't require more than 3 passes + * to catch all _recursive_ RCU delayed work. + */ +void i915_gem_drain_workqueue(struct drm_i915_private *i915) +{ + int i; + + for (i = 0; i < 3; i++) { + flush_workqueue(i915->wq); + rcu_barrier(); + i915_gem_drain_freed_objects(i915); + } + + drain_workqueue(i915->wq); +} + int i915_gem_init(struct drm_i915_private *dev_priv) { int ret; /* We need to fallback to 4K pages if host doesn't support huge gtt. */ if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv)) - mkwrite_device_info(dev_priv)->page_sizes = - I915_GTT_PAGE_SIZE_4K; + RUNTIME_INFO(dev_priv)->page_sizes = I915_GTT_PAGE_SIZE_4K; ret = i915_gem_init_userptr(dev_priv); if (ret) @@ -1173,7 +1213,7 @@ void i915_gem_driver_unregister(struct drm_i915_private *i915) void i915_gem_driver_remove(struct drm_i915_private *dev_priv) { - intel_wakeref_auto_fini(&to_gt(dev_priv)->ggtt->userfault_wakeref); + intel_wakeref_auto_fini(&to_gt(dev_priv)->userfault_wakeref); i915_gem_suspend_late(dev_priv); intel_gt_driver_remove(to_gt(dev_priv)); @@ -1213,7 +1253,7 @@ void i915_gem_init_early(struct drm_i915_private *dev_priv) i915_gem_init__mm(dev_priv); i915_gem_init__contexts(dev_priv); - spin_lock_init(&dev_priv->fb_tracking.lock); + spin_lock_init(&dev_priv->display.fb_tracking.lock); } void i915_gem_cleanup_early(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h index 68d8d52bd541..a5cdf6662d01 100644 --- a/drivers/gpu/drm/i915/i915_gem.h +++ b/drivers/gpu/drm/i915/i915_gem.h @@ -26,12 +26,55 @@ #define __I915_GEM_H__ #include <linux/bug.h> +#include <linux/types.h> #include <drm/drm_drv.h> #include "i915_utils.h" +struct drm_file; +struct drm_i915_gem_object; struct drm_i915_private; +struct i915_gem_ww_ctx; +struct i915_gtt_view; +struct i915_vma; + +void i915_gem_init_early(struct drm_i915_private *i915); +void i915_gem_cleanup_early(struct drm_i915_private *i915); + +void i915_gem_drain_freed_objects(struct drm_i915_private *i915); +void i915_gem_drain_workqueue(struct drm_i915_private *i915); + +struct i915_vma * __must_check +i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj, + struct i915_gem_ww_ctx *ww, + const struct i915_gtt_view *view, + u64 size, u64 alignment, u64 flags); + +struct i915_vma * __must_check +i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, + const struct i915_gtt_view *view, + u64 size, u64 alignment, u64 flags); + +int i915_gem_object_unbind(struct drm_i915_gem_object *obj, + unsigned long flags); +#define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0) +#define I915_GEM_OBJECT_UNBIND_BARRIER BIT(1) +#define I915_GEM_OBJECT_UNBIND_TEST BIT(2) +#define I915_GEM_OBJECT_UNBIND_VM_TRYLOCK BIT(3) +#define I915_GEM_OBJECT_UNBIND_ASYNC BIT(4) + +void i915_gem_runtime_suspend(struct drm_i915_private *i915); + +int __must_check i915_gem_init(struct drm_i915_private *i915); +void i915_gem_driver_register(struct drm_i915_private *i915); +void i915_gem_driver_unregister(struct drm_i915_private *i915); +void i915_gem_driver_remove(struct drm_i915_private *i915); +void i915_gem_driver_release(struct drm_i915_private *i915); + +int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file); + +/* FIXME: All of the below belong somewhere else. */ #ifdef CONFIG_DRM_I915_DEBUG_GEM diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c index 6fd15b39570c..342c8ca6414e 100644 --- a/drivers/gpu/drm/i915/i915_getparam.c +++ b/drivers/gpu/drm/i915/i915_getparam.c @@ -36,7 +36,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data, value = to_gt(i915)->ggtt->num_fences; break; case I915_PARAM_HAS_OVERLAY: - value = !!i915->overlay; + value = !!i915->display.overlay; break; case I915_PARAM_HAS_BSD: value = !!intel_engine_lookup_user(i915, diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 16d8b7ba65dc..9ea2fe34e7d3 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -646,8 +646,7 @@ static void err_print_capabilities(struct drm_i915_error_state_buf *m, { struct drm_printer p = i915_error_printer(m); - intel_device_info_print_static(&error->device_info, &p); - intel_device_info_print_runtime(&error->runtime_info, &p); + intel_device_info_print(&error->device_info, &error->runtime_info, &p); intel_driver_caps_print(&error->driver_caps, &p); } diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 783a6ca41a61..86a42d9e8041 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -185,7 +185,7 @@ static const u32 hpd_sde_dg1[HPD_NUM_PINS] = { static void intel_hpd_init_pins(struct drm_i915_private *dev_priv) { - struct i915_hotplug *hpd = &dev_priv->hotplug; + struct intel_hotplug *hpd = &dev_priv->display.hotplug; if (HAS_GMCH(dev_priv)) { if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || @@ -595,7 +595,7 @@ void i915_disable_pipestat(struct drm_i915_private *dev_priv, static bool i915_has_asle(struct drm_i915_private *dev_priv) { - if (!dev_priv->opregion.asle) + if (!dev_priv->display.opregion.asle) return false; return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); @@ -1104,9 +1104,9 @@ static void ivb_parity_work(struct work_struct *work) out: drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice); - spin_lock_irq(>->irq_lock); + spin_lock_irq(gt->irq_lock); gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv)); - spin_unlock_irq(>->irq_lock); + spin_unlock_irq(gt->irq_lock); mutex_unlock(&dev_priv->drm.struct_mutex); } @@ -1272,7 +1272,7 @@ static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv, u32 enabled_irqs = 0; for_each_intel_encoder(&dev_priv->drm, encoder) - if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) + if (dev_priv->display.hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) enabled_irqs |= hpd[encoder->hpd_pin]; return enabled_irqs; @@ -1304,12 +1304,12 @@ static u32 intel_hpd_hotplug_enables(struct drm_i915_private *i915, static void gmbus_irq_handler(struct drm_i915_private *dev_priv) { - wake_up_all(&dev_priv->gmbus_wait_queue); + wake_up_all(&dev_priv->display.gmbus.wait_queue); } static void dp_aux_irq_handler(struct drm_i915_private *dev_priv) { - wake_up_all(&dev_priv->gmbus_wait_queue); + wake_up_all(&dev_priv->display.gmbus.wait_queue); } #if defined(CONFIG_DEBUG_FS) @@ -1637,7 +1637,7 @@ static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, if (hotplug_trigger) { intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, hotplug_trigger, - dev_priv->hotplug.hpd, + dev_priv->display.hotplug.hpd, i9xx_port_hotplug_long_detect); intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); @@ -1841,7 +1841,7 @@ static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, dig_hotplug_reg, - dev_priv->hotplug.pch_hpd, + dev_priv->display.hotplug.pch_hpd, pch_port_hotplug_long_detect); intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); @@ -1986,7 +1986,7 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, ddi_hotplug_trigger, dig_hotplug_reg, - dev_priv->hotplug.pch_hpd, + dev_priv->display.hotplug.pch_hpd, icp_ddi_port_hotplug_long_detect); } @@ -1998,7 +1998,7 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, tc_hotplug_trigger, dig_hotplug_reg, - dev_priv->hotplug.pch_hpd, + dev_priv->display.hotplug.pch_hpd, icp_tc_port_hotplug_long_detect); } @@ -2024,7 +2024,7 @@ static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, dig_hotplug_reg, - dev_priv->hotplug.pch_hpd, + dev_priv->display.hotplug.pch_hpd, spt_port_hotplug_long_detect); } @@ -2036,7 +2036,7 @@ static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug2_trigger, dig_hotplug_reg, - dev_priv->hotplug.pch_hpd, + dev_priv->display.hotplug.pch_hpd, spt_port_hotplug2_long_detect); } @@ -2057,7 +2057,7 @@ static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, dig_hotplug_reg, - dev_priv->hotplug.hpd, + dev_priv->display.hotplug.hpd, ilk_port_hotplug_long_detect); intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); @@ -2237,7 +2237,7 @@ static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, dig_hotplug_reg, - dev_priv->hotplug.hpd, + dev_priv->display.hotplug.hpd, bxt_port_hotplug_long_detect); intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); @@ -2257,7 +2257,7 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir) intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc, dig_hotplug_reg, - dev_priv->hotplug.hpd, + dev_priv->display.hotplug.hpd, gen11_port_hotplug_long_detect); } @@ -2269,7 +2269,7 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir) intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt, dig_hotplug_reg, - dev_priv->hotplug.hpd, + dev_priv->display.hotplug.hpd, gen11_port_hotplug_long_detect); } @@ -2653,9 +2653,9 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) } static u32 -gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl) +gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl) { - void __iomem * const regs = gt->uncore->regs; + void __iomem * const regs = i915->uncore.regs; u32 iir; if (!(master_ctl & GEN11_GU_MISC_IRQ)) @@ -2669,10 +2669,10 @@ gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl) } static void -gen11_gu_misc_irq_handler(struct intel_gt *gt, const u32 iir) +gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir) { if (iir & GEN11_GU_MISC_GSE) - intel_opregion_asle_intr(gt->i915); + intel_opregion_asle_intr(i915); } static inline u32 gen11_master_intr_disable(void __iomem * const regs) @@ -2736,11 +2736,11 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg) if (master_ctl & GEN11_DISPLAY_IRQ) gen11_display_irq_handler(i915); - gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl); + gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl); gen11_master_intr_enable(regs); - gen11_gu_misc_irq_handler(gt, gu_misc_iir); + gen11_gu_misc_irq_handler(i915, gu_misc_iir); pmu_irq_stats(i915, IRQ_HANDLED); @@ -2801,11 +2801,11 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg) if (master_ctl & GEN11_DISPLAY_IRQ) gen11_display_irq_handler(i915); - gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl); + gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl); dg1_master_intr_enable(regs); - gen11_gu_misc_irq_handler(gt, gu_misc_iir); + gen11_gu_misc_irq_handler(i915, gu_misc_iir); pmu_irq_stats(i915, IRQ_HANDLED); @@ -3313,8 +3313,8 @@ static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) { u32 hotplug_irqs, enabled_irqs; - enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd); - hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd); + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); + hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); @@ -3383,8 +3383,8 @@ static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv) { u32 hotplug_irqs, enabled_irqs; - enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd); - hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd); + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); + hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP) intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ); @@ -3460,8 +3460,8 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv) u32 hotplug_irqs, enabled_irqs; u32 val; - enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd); - hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd); + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd); + hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd); val = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IMR); val &= ~hotplug_irqs; @@ -3538,8 +3538,8 @@ static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ); - enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd); - hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd); + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); + hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); @@ -3578,8 +3578,8 @@ static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) { u32 hotplug_irqs, enabled_irqs; - enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd); - hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd); + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd); + hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd); if (DISPLAY_VER(dev_priv) >= 8) bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); @@ -3636,8 +3636,8 @@ static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) { u32 hotplug_irqs, enabled_irqs; - enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd); - hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd); + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd); + hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd); bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); @@ -4370,8 +4370,8 @@ HPD_FUNCS(ilk); void intel_hpd_irq_setup(struct drm_i915_private *i915) { - if (i915->display_irqs_enabled && i915->hotplug_funcs) - i915->hotplug_funcs->hpd_irq_setup(i915); + if (i915->display_irqs_enabled && i915->display.funcs.hotplug) + i915->display.funcs.hotplug->hpd_irq_setup(i915); } /** @@ -4413,33 +4413,33 @@ void intel_irq_init(struct drm_i915_private *dev_priv) if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) dev_priv->display_irqs_enabled = false; - dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD; + dev_priv->display.hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD; /* If we have MST support, we want to avoid doing short HPD IRQ storm * detection, as short HPD storms will occur as a natural part of * sideband messaging with MST. * On older platforms however, IRQ storms can occur with both long and * short pulses, as seen on some G4x systems. */ - dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv); + dev_priv->display.hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv); if (HAS_GMCH(dev_priv)) { if (I915_HAS_HOTPLUG(dev_priv)) - dev_priv->hotplug_funcs = &i915_hpd_funcs; + dev_priv->display.funcs.hotplug = &i915_hpd_funcs; } else { if (HAS_PCH_DG2(dev_priv)) - dev_priv->hotplug_funcs = &icp_hpd_funcs; + dev_priv->display.funcs.hotplug = &icp_hpd_funcs; else if (HAS_PCH_DG1(dev_priv)) - dev_priv->hotplug_funcs = &dg1_hpd_funcs; + dev_priv->display.funcs.hotplug = &dg1_hpd_funcs; else if (DISPLAY_VER(dev_priv) >= 11) - dev_priv->hotplug_funcs = &gen11_hpd_funcs; + dev_priv->display.funcs.hotplug = &gen11_hpd_funcs; else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) - dev_priv->hotplug_funcs = &bxt_hpd_funcs; + dev_priv->display.funcs.hotplug = &bxt_hpd_funcs; else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) - dev_priv->hotplug_funcs = &icp_hpd_funcs; + dev_priv->display.funcs.hotplug = &icp_hpd_funcs; else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) - dev_priv->hotplug_funcs = &spt_hpd_funcs; + dev_priv->display.funcs.hotplug = &spt_hpd_funcs; else - dev_priv->hotplug_funcs = &ilk_hpd_funcs; + dev_priv->display.funcs.hotplug = &ilk_hpd_funcs; } } diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 59a579ed03bb..77e7df21f539 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -26,16 +26,20 @@ #include <drm/drm_drv.h> #include <drm/i915_pciids.h> +#include "gt/intel_gt_regs.h" +#include "gt/intel_sa_media.h" + #include "i915_driver.h" #include "i915_drv.h" #include "i915_pci.h" #include "i915_reg.h" +#include "intel_pci_config.h" #define PLATFORM(x) .platform = (x) #define GEN(x) \ - .graphics.ver = (x), \ - .media.ver = (x), \ - .display.ver = (x) + .__runtime.graphics.ip.ver = (x), \ + .__runtime.media.ip.ver = (x), \ + .__runtime.display.ip.ver = (x) #define I845_PIPE_OFFSETS \ .display.pipe_offsets = { \ @@ -159,16 +163,16 @@ /* Keep in gen based order, and chronological order within a gen */ #define GEN_DEFAULT_PAGE_SIZES \ - .page_sizes = I915_GTT_PAGE_SIZE_4K + .__runtime.page_sizes = I915_GTT_PAGE_SIZE_4K #define GEN_DEFAULT_REGIONS \ - .memory_regions = REGION_SMEM | REGION_STOLEN_SMEM + .__runtime.memory_regions = REGION_SMEM | REGION_STOLEN_SMEM #define I830_FEATURES \ GEN(2), \ .is_mobile = 1, \ - .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ - .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ + .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ + .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ .display.has_overlay = 1, \ .display.cursor_needs_physical = 1, \ .display.overlay_needs_physical = 1, \ @@ -177,7 +181,7 @@ .has_3d_pipeline = 1, \ .hws_needs_physical = 1, \ .unfenced_needs_alignment = 1, \ - .platform_engine_mask = BIT(RCS0), \ + .__runtime.platform_engine_mask = BIT(RCS0), \ .has_snoop = true, \ .has_coherent_ggtt = false, \ .dma_mask_size = 32, \ @@ -189,8 +193,8 @@ #define I845_FEATURES \ GEN(2), \ - .display.pipe_mask = BIT(PIPE_A), \ - .display.cpu_transcoder_mask = BIT(TRANSCODER_A), \ + .__runtime.pipe_mask = BIT(PIPE_A), \ + .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A), \ .display.has_overlay = 1, \ .display.overlay_needs_physical = 1, \ .display.has_gmch = 1, \ @@ -198,7 +202,7 @@ .gpu_reset_clobbers_display = true, \ .hws_needs_physical = 1, \ .unfenced_needs_alignment = 1, \ - .platform_engine_mask = BIT(RCS0), \ + .__runtime.platform_engine_mask = BIT(RCS0), \ .has_snoop = true, \ .has_coherent_ggtt = false, \ .dma_mask_size = 32, \ @@ -221,22 +225,22 @@ static const struct intel_device_info i845g_info = { static const struct intel_device_info i85x_info = { I830_FEATURES, PLATFORM(INTEL_I85X), - .display.fbc_mask = BIT(INTEL_FBC_A), + .__runtime.fbc_mask = BIT(INTEL_FBC_A), }; static const struct intel_device_info i865g_info = { I845_FEATURES, PLATFORM(INTEL_I865G), - .display.fbc_mask = BIT(INTEL_FBC_A), + .__runtime.fbc_mask = BIT(INTEL_FBC_A), }; #define GEN3_FEATURES \ GEN(3), \ - .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ - .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ + .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ + .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ .display.has_gmch = 1, \ .gpu_reset_clobbers_display = true, \ - .platform_engine_mask = BIT(RCS0), \ + .__runtime.platform_engine_mask = BIT(RCS0), \ .has_3d_pipeline = 1, \ .has_snoop = true, \ .has_coherent_ggtt = true, \ @@ -266,7 +270,7 @@ static const struct intel_device_info i915gm_info = { .display.has_overlay = 1, .display.overlay_needs_physical = 1, .display.supports_tv = 1, - .display.fbc_mask = BIT(INTEL_FBC_A), + .__runtime.fbc_mask = BIT(INTEL_FBC_A), .hws_needs_physical = 1, .unfenced_needs_alignment = 1, }; @@ -291,7 +295,7 @@ static const struct intel_device_info i945gm_info = { .display.has_overlay = 1, .display.overlay_needs_physical = 1, .display.supports_tv = 1, - .display.fbc_mask = BIT(INTEL_FBC_A), + .__runtime.fbc_mask = BIT(INTEL_FBC_A), .hws_needs_physical = 1, .unfenced_needs_alignment = 1, }; @@ -323,12 +327,12 @@ static const struct intel_device_info pnv_m_info = { #define GEN4_FEATURES \ GEN(4), \ - .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ - .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ + .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ + .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ .display.has_hotplug = 1, \ .display.has_gmch = 1, \ .gpu_reset_clobbers_display = true, \ - .platform_engine_mask = BIT(RCS0), \ + .__runtime.platform_engine_mask = BIT(RCS0), \ .has_3d_pipeline = 1, \ .has_snoop = true, \ .has_coherent_ggtt = true, \ @@ -351,7 +355,7 @@ static const struct intel_device_info i965gm_info = { GEN4_FEATURES, PLATFORM(INTEL_I965GM), .is_mobile = 1, - .display.fbc_mask = BIT(INTEL_FBC_A), + .__runtime.fbc_mask = BIT(INTEL_FBC_A), .display.has_overlay = 1, .display.supports_tv = 1, .hws_needs_physical = 1, @@ -361,7 +365,7 @@ static const struct intel_device_info i965gm_info = { static const struct intel_device_info g45_info = { GEN4_FEATURES, PLATFORM(INTEL_G45), - .platform_engine_mask = BIT(RCS0) | BIT(VCS0), + .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0), .gpu_reset_clobbers_display = false, }; @@ -369,18 +373,18 @@ static const struct intel_device_info gm45_info = { GEN4_FEATURES, PLATFORM(INTEL_GM45), .is_mobile = 1, - .display.fbc_mask = BIT(INTEL_FBC_A), + .__runtime.fbc_mask = BIT(INTEL_FBC_A), .display.supports_tv = 1, - .platform_engine_mask = BIT(RCS0) | BIT(VCS0), + .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0), .gpu_reset_clobbers_display = false, }; #define GEN5_FEATURES \ GEN(5), \ - .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ - .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ + .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ + .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ .display.has_hotplug = 1, \ - .platform_engine_mask = BIT(RCS0) | BIT(VCS0), \ + .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0), \ .has_3d_pipeline = 1, \ .has_snoop = true, \ .has_coherent_ggtt = true, \ @@ -403,16 +407,16 @@ static const struct intel_device_info ilk_m_info = { PLATFORM(INTEL_IRONLAKE), .is_mobile = 1, .has_rps = true, - .display.fbc_mask = BIT(INTEL_FBC_A), + .__runtime.fbc_mask = BIT(INTEL_FBC_A), }; #define GEN6_FEATURES \ GEN(6), \ - .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ - .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ + .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ + .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ .display.has_hotplug = 1, \ - .display.fbc_mask = BIT(INTEL_FBC_A), \ - .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \ + .__runtime.fbc_mask = BIT(INTEL_FBC_A), \ + .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \ .has_3d_pipeline = 1, \ .has_coherent_ggtt = true, \ .has_llc = 1, \ @@ -420,8 +424,8 @@ static const struct intel_device_info ilk_m_info = { .has_rc6p = 1, \ .has_rps = true, \ .dma_mask_size = 40, \ - .ppgtt_type = INTEL_PPGTT_ALIASING, \ - .ppgtt_size = 31, \ + .__runtime.ppgtt_type = INTEL_PPGTT_ALIASING, \ + .__runtime.ppgtt_size = 31, \ I9XX_PIPE_OFFSETS, \ I9XX_CURSOR_OFFSETS, \ ILK_COLORS, \ @@ -460,11 +464,11 @@ static const struct intel_device_info snb_m_gt2_info = { #define GEN7_FEATURES \ GEN(7), \ - .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \ - .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), \ + .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \ + .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), \ .display.has_hotplug = 1, \ - .display.fbc_mask = BIT(INTEL_FBC_A), \ - .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \ + .__runtime.fbc_mask = BIT(INTEL_FBC_A), \ + .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \ .has_3d_pipeline = 1, \ .has_coherent_ggtt = true, \ .has_llc = 1, \ @@ -473,8 +477,8 @@ static const struct intel_device_info snb_m_gt2_info = { .has_reset_engine = true, \ .has_rps = true, \ .dma_mask_size = 40, \ - .ppgtt_type = INTEL_PPGTT_ALIASING, \ - .ppgtt_size = 31, \ + .__runtime.ppgtt_type = INTEL_PPGTT_ALIASING, \ + .__runtime.ppgtt_size = 31, \ IVB_PIPE_OFFSETS, \ IVB_CURSOR_OFFSETS, \ IVB_COLORS, \ @@ -516,8 +520,8 @@ static const struct intel_device_info ivb_q_info = { GEN7_FEATURES, PLATFORM(INTEL_IVYBRIDGE), .gt = 2, - .display.pipe_mask = 0, /* legal, last one wins */ - .display.cpu_transcoder_mask = 0, + .__runtime.pipe_mask = 0, /* legal, last one wins */ + .__runtime.cpu_transcoder_mask = 0, .has_l3_dpf = 1, }; @@ -525,8 +529,8 @@ static const struct intel_device_info vlv_info = { PLATFORM(INTEL_VALLEYVIEW), GEN(7), .is_lp = 1, - .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), - .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), + .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), + .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), .has_runtime_pm = 1, .has_rc6 = 1, .has_reset_engine = true, @@ -534,11 +538,11 @@ static const struct intel_device_info vlv_info = { .display.has_gmch = 1, .display.has_hotplug = 1, .dma_mask_size = 40, - .ppgtt_type = INTEL_PPGTT_ALIASING, - .ppgtt_size = 31, + .__runtime.ppgtt_type = INTEL_PPGTT_ALIASING, + .__runtime.ppgtt_size = 31, .has_snoop = true, .has_coherent_ggtt = false, - .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), + .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), .display.mmio_offset = VLV_DISPLAY_BASE, I9XX_PIPE_OFFSETS, I9XX_CURSOR_OFFSETS, @@ -549,8 +553,8 @@ static const struct intel_device_info vlv_info = { #define G75_FEATURES \ GEN7_FEATURES, \ - .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \ - .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ + .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \ + .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP), \ .display.has_ddi = 1, \ .display.has_fpga_dbg = 1, \ @@ -584,8 +588,8 @@ static const struct intel_device_info hsw_gt3_info = { GEN(8), \ .has_logical_ring_contexts = 1, \ .dma_mask_size = 39, \ - .ppgtt_type = INTEL_PPGTT_FULL, \ - .ppgtt_size = 48, \ + .__runtime.ppgtt_type = INTEL_PPGTT_FULL, \ + .__runtime.ppgtt_size = 48, \ .has_64bit_reloc = 1 #define BDW_PLATFORM \ @@ -613,18 +617,18 @@ static const struct intel_device_info bdw_rsvd_info = { static const struct intel_device_info bdw_gt3_info = { BDW_PLATFORM, .gt = 3, - .platform_engine_mask = + .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1), }; static const struct intel_device_info chv_info = { PLATFORM(INTEL_CHERRYVIEW), GEN(8), - .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), - .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), + .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), + .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), .display.has_hotplug = 1, .is_lp = 1, - .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), + .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), .has_64bit_reloc = 1, .has_runtime_pm = 1, .has_rc6 = 1, @@ -632,8 +636,8 @@ static const struct intel_device_info chv_info = { .has_logical_ring_contexts = 1, .display.has_gmch = 1, .dma_mask_size = 39, - .ppgtt_type = INTEL_PPGTT_FULL, - .ppgtt_size = 32, + .__runtime.ppgtt_type = INTEL_PPGTT_FULL, + .__runtime.ppgtt_size = 32, .has_reset_engine = 1, .has_snoop = true, .has_coherent_ggtt = false, @@ -646,16 +650,16 @@ static const struct intel_device_info chv_info = { }; #define GEN9_DEFAULT_PAGE_SIZES \ - .page_sizes = I915_GTT_PAGE_SIZE_4K | \ - I915_GTT_PAGE_SIZE_64K + .__runtime.page_sizes = I915_GTT_PAGE_SIZE_4K | \ + I915_GTT_PAGE_SIZE_64K #define GEN9_FEATURES \ GEN8_FEATURES, \ GEN(9), \ GEN9_DEFAULT_PAGE_SIZES, \ - .display.has_dmc = 1, \ + .__runtime.has_dmc = 1, \ .has_gt_uc = 1, \ - .display.has_hdcp = 1, \ + .__runtime.has_hdcp = 1, \ .display.has_ipc = 1, \ .display.has_psr = 1, \ .display.has_psr_hw_tracking = 1, \ @@ -678,7 +682,7 @@ static const struct intel_device_info skl_gt2_info = { #define SKL_GT3_PLUS_PLATFORM \ SKL_PLATFORM, \ - .platform_engine_mask = \ + .__runtime.platform_engine_mask = \ BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1) @@ -697,29 +701,29 @@ static const struct intel_device_info skl_gt4_info = { .is_lp = 1, \ .display.dbuf.slice_mask = BIT(DBUF_S1), \ .display.has_hotplug = 1, \ - .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \ - .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \ - .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ + .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \ + .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \ + .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \ BIT(TRANSCODER_DSI_A) | BIT(TRANSCODER_DSI_C), \ .has_3d_pipeline = 1, \ .has_64bit_reloc = 1, \ .display.has_ddi = 1, \ .display.has_fpga_dbg = 1, \ - .display.fbc_mask = BIT(INTEL_FBC_A), \ - .display.has_hdcp = 1, \ + .__runtime.fbc_mask = BIT(INTEL_FBC_A), \ + .__runtime.has_hdcp = 1, \ .display.has_psr = 1, \ .display.has_psr_hw_tracking = 1, \ .has_runtime_pm = 1, \ - .display.has_dmc = 1, \ + .__runtime.has_dmc = 1, \ .has_rc6 = 1, \ .has_rps = true, \ .display.has_dp_mst = 1, \ .has_logical_ring_contexts = 1, \ .has_gt_uc = 1, \ .dma_mask_size = 39, \ - .ppgtt_type = INTEL_PPGTT_FULL, \ - .ppgtt_size = 48, \ + .__runtime.ppgtt_type = INTEL_PPGTT_FULL, \ + .__runtime.ppgtt_size = 48, \ .has_reset_engine = 1, \ .has_snoop = true, \ .has_coherent_ggtt = false, \ @@ -739,7 +743,7 @@ static const struct intel_device_info bxt_info = { static const struct intel_device_info glk_info = { GEN9_LP_FEATURES, PLATFORM(INTEL_GEMINILAKE), - .display.ver = 10, + .__runtime.display.ip.ver = 10, .display.dbuf.size = 1024 - 4, /* 4 blocks for bypass path allocation */ GLK_COLORS, }; @@ -761,7 +765,7 @@ static const struct intel_device_info kbl_gt2_info = { static const struct intel_device_info kbl_gt3_info = { KBL_PLATFORM, .gt = 3, - .platform_engine_mask = + .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1), }; @@ -782,7 +786,7 @@ static const struct intel_device_info cfl_gt2_info = { static const struct intel_device_info cfl_gt3_info = { CFL_PLATFORM, .gt = 3, - .platform_engine_mask = + .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1), }; @@ -801,15 +805,15 @@ static const struct intel_device_info cml_gt2_info = { }; #define GEN11_DEFAULT_PAGE_SIZES \ - .page_sizes = I915_GTT_PAGE_SIZE_4K | \ - I915_GTT_PAGE_SIZE_64K | \ - I915_GTT_PAGE_SIZE_2M + .__runtime.page_sizes = I915_GTT_PAGE_SIZE_4K | \ + I915_GTT_PAGE_SIZE_64K | \ + I915_GTT_PAGE_SIZE_2M #define GEN11_FEATURES \ GEN9_FEATURES, \ GEN11_DEFAULT_PAGE_SIZES, \ .display.abox_mask = BIT(0), \ - .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ + .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \ BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), \ .display.pipe_offsets = { \ @@ -832,37 +836,37 @@ static const struct intel_device_info cml_gt2_info = { ICL_COLORS, \ .display.dbuf.size = 2048, \ .display.dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2), \ - .display.has_dsc = 1, \ + .__runtime.has_dsc = 1, \ .has_coherent_ggtt = false, \ .has_logical_ring_elsq = 1 static const struct intel_device_info icl_info = { GEN11_FEATURES, PLATFORM(INTEL_ICELAKE), - .platform_engine_mask = + .__runtime.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), }; static const struct intel_device_info ehl_info = { GEN11_FEATURES, PLATFORM(INTEL_ELKHARTLAKE), - .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0), - .ppgtt_size = 36, + .__runtime.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0), + .__runtime.ppgtt_size = 36, }; static const struct intel_device_info jsl_info = { GEN11_FEATURES, PLATFORM(INTEL_JASPERLAKE), - .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0), - .ppgtt_size = 36, + .__runtime.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0), + .__runtime.ppgtt_size = 36, }; #define GEN12_FEATURES \ GEN11_FEATURES, \ GEN(12), \ .display.abox_mask = GENMASK(2, 1), \ - .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \ - .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ + .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \ + .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ BIT(TRANSCODER_C) | BIT(TRANSCODER_D) | \ BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), \ .display.pipe_offsets = { \ @@ -890,7 +894,7 @@ static const struct intel_device_info tgl_info = { GEN12_FEATURES, PLATFORM(INTEL_TIGERLAKE), .display.has_modular_fia = 1, - .platform_engine_mask = + .__runtime.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), }; @@ -898,17 +902,17 @@ static const struct intel_device_info rkl_info = { GEN12_FEATURES, PLATFORM(INTEL_ROCKETLAKE), .display.abox_mask = BIT(0), - .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), - .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | + .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), + .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), .display.has_hti = 1, .display.has_psr_hw_tracking = 0, - .platform_engine_mask = + .__runtime.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0), }; #define DGFX_FEATURES \ - .memory_regions = REGION_SMEM | REGION_LMEM | REGION_STOLEN_LMEM, \ + .__runtime.memory_regions = REGION_SMEM | REGION_LMEM | REGION_STOLEN_LMEM, \ .has_llc = 0, \ .has_pxp = 0, \ .has_snoop = 1, \ @@ -918,24 +922,24 @@ static const struct intel_device_info rkl_info = { static const struct intel_device_info dg1_info = { GEN12_FEATURES, DGFX_FEATURES, - .graphics.rel = 10, + .__runtime.graphics.ip.rel = 10, PLATFORM(INTEL_DG1), - .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), + .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), .require_force_probe = 1, - .platform_engine_mask = + .__runtime.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), /* Wa_16011227922 */ - .ppgtt_size = 47, + .__runtime.ppgtt_size = 47, }; static const struct intel_device_info adl_s_info = { GEN12_FEATURES, PLATFORM(INTEL_ALDERLAKE_S), - .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), + .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), .display.has_hti = 1, .display.has_psr_hw_tracking = 0, - .platform_engine_mask = + .__runtime.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), .dma_mask_size = 39, }; @@ -951,18 +955,18 @@ static const struct intel_device_info adl_s_info = { .display.dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | \ BIT(DBUF_S4), \ .display.has_ddi = 1, \ - .display.has_dmc = 1, \ + .__runtime.has_dmc = 1, \ .display.has_dp_mst = 1, \ .display.has_dsb = 1, \ - .display.has_dsc = 1, \ - .display.fbc_mask = BIT(INTEL_FBC_A), \ + .__runtime.has_dsc = 1, \ + .__runtime.fbc_mask = BIT(INTEL_FBC_A), \ .display.has_fpga_dbg = 1, \ - .display.has_hdcp = 1, \ + .__runtime.has_hdcp = 1, \ .display.has_hotplug = 1, \ .display.has_ipc = 1, \ .display.has_psr = 1, \ - .display.ver = 13, \ - .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \ + .__runtime.display.ip.ver = 13, \ + .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \ .display.pipe_offsets = { \ [TRANSCODER_A] = PIPE_A_OFFSET, \ [TRANSCODER_B] = PIPE_B_OFFSET, \ @@ -985,28 +989,28 @@ static const struct intel_device_info adl_p_info = { GEN12_FEATURES, XE_LPD_FEATURES, PLATFORM(INTEL_ALDERLAKE_P), - .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | + .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C) | BIT(TRANSCODER_D) | BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), .display.has_cdclk_crawl = 1, .display.has_modular_fia = 1, .display.has_psr_hw_tracking = 0, - .platform_engine_mask = + .__runtime.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), - .ppgtt_size = 48, + .__runtime.ppgtt_size = 48, .dma_mask_size = 39, }; #undef GEN #define XE_HP_PAGE_SIZES \ - .page_sizes = I915_GTT_PAGE_SIZE_4K | \ - I915_GTT_PAGE_SIZE_64K | \ - I915_GTT_PAGE_SIZE_2M + .__runtime.page_sizes = I915_GTT_PAGE_SIZE_4K | \ + I915_GTT_PAGE_SIZE_64K | \ + I915_GTT_PAGE_SIZE_2M #define XE_HP_FEATURES \ - .graphics.ver = 12, \ - .graphics.rel = 50, \ + .__runtime.graphics.ip.ver = 12, \ + .__runtime.graphics.ip.rel = 50, \ XE_HP_PAGE_SIZES, \ .dma_mask_size = 46, \ .has_3d_pipeline = 1, \ @@ -1022,12 +1026,12 @@ static const struct intel_device_info adl_p_info = { .has_reset_engine = 1, \ .has_rps = 1, \ .has_runtime_pm = 1, \ - .ppgtt_size = 48, \ - .ppgtt_type = INTEL_PPGTT_FULL + .__runtime.ppgtt_size = 48, \ + .__runtime.ppgtt_type = INTEL_PPGTT_FULL #define XE_HPM_FEATURES \ - .media.ver = 12, \ - .media.rel = 50 + .__runtime.media.ip.ver = 12, \ + .__runtime.media.ip.rel = 50 __maybe_unused static const struct intel_device_info xehpsdv_info = { @@ -1039,7 +1043,7 @@ static const struct intel_device_info xehpsdv_info = { .has_64k_pages = 1, .needs_compact_pt = 1, .has_media_ratio_mode = 1, - .platform_engine_mask = + .__runtime.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VECS1) | BIT(VECS2) | BIT(VECS3) | BIT(VCS0) | BIT(VCS1) | BIT(VCS2) | BIT(VCS3) | @@ -1052,8 +1056,8 @@ static const struct intel_device_info xehpsdv_info = { XE_HP_FEATURES, \ XE_HPM_FEATURES, \ DGFX_FEATURES, \ - .graphics.rel = 55, \ - .media.rel = 55, \ + .__runtime.graphics.ip.rel = 55, \ + .__runtime.media.ip.rel = 55, \ PLATFORM(INTEL_DG2), \ .has_4tile = 1, \ .has_64k_pages = 1, \ @@ -1061,7 +1065,7 @@ static const struct intel_device_info xehpsdv_info = { .has_heci_pxp = 1, \ .needs_compact_pt = 1, \ .has_media_ratio_mode = 1, \ - .platform_engine_mask = \ + .__runtime.platform_engine_mask = \ BIT(RCS0) | BIT(BCS0) | \ BIT(VECS0) | BIT(VECS1) | \ BIT(VCS0) | BIT(VCS2) | \ @@ -1070,7 +1074,7 @@ static const struct intel_device_info xehpsdv_info = { static const struct intel_device_info dg2_info = { DG2_FEATURES, XE_LPD_FEATURES, - .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | + .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C) | BIT(TRANSCODER_D), .require_force_probe = 1, }; @@ -1096,12 +1100,12 @@ static const struct intel_device_info pvc_info = { XE_HPC_FEATURES, XE_HPM_FEATURES, DGFX_FEATURES, - .graphics.rel = 60, - .media.rel = 60, + .__runtime.graphics.ip.rel = 60, + .__runtime.media.ip.rel = 60, PLATFORM(INTEL_PONTEVECCHIO), .display = { 0 }, .has_flat_ccs = 0, - .platform_engine_mask = + .__runtime.platform_engine_mask = BIT(BCS0) | BIT(VCS0) | BIT(CCS0) | BIT(CCS1) | BIT(CCS2) | BIT(CCS3), @@ -1110,8 +1114,19 @@ static const struct intel_device_info pvc_info = { #define XE_LPDP_FEATURES \ XE_LPD_FEATURES, \ - .display.ver = 14, \ - .display.has_cdclk_crawl = 1 + .__runtime.display.ip.ver = 14, \ + .display.has_cdclk_crawl = 1, \ + .__runtime.fbc_mask = BIT(INTEL_FBC_A) | BIT(INTEL_FBC_B) + +static const struct intel_gt_definition xelpmp_extra_gt[] = { + { + .type = GT_MEDIA, + .name = "Standalone Media GT", + .gsi_offset = MTL_MEDIA_GSI_BASE, + .engine_mask = BIT(VECS0) | BIT(VCS0) | BIT(VCS2), + }, + {} +}; __maybe_unused static const struct intel_device_info mtl_info = { @@ -1121,15 +1136,16 @@ static const struct intel_device_info mtl_info = { * Real graphics IP version will be obtained from hardware GMD_ID * register. Value provided here is just for sanity checking. */ - .graphics.ver = 12, - .graphics.rel = 70, - .media.ver = 13, + .__runtime.graphics.ip.ver = 12, + .__runtime.graphics.ip.rel = 70, + .__runtime.media.ip.ver = 13, PLATFORM(INTEL_METEORLAKE), .display.has_modular_fia = 1, + .extra_gt_list = xelpmp_extra_gt, .has_flat_ccs = 0, .has_snoop = 1, - .memory_regions = REGION_SMEM | REGION_STOLEN_LMEM, - .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(CCS0), + .__runtime.memory_regions = REGION_SMEM | REGION_STOLEN_LMEM, + .__runtime.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(CCS0), .require_force_probe = 1, }; @@ -1263,6 +1279,27 @@ static bool force_probe(u16 device_id, const char *devices) return ret; } +bool i915_pci_resource_valid(struct pci_dev *pdev, int bar) +{ + if (!pci_resource_flags(pdev, bar)) + return false; + + if (pci_resource_flags(pdev, bar) & IORESOURCE_UNSET) + return false; + + if (!pci_resource_len(pdev, bar)) + return false; + + return true; +} + +static bool intel_mmio_bar_valid(struct pci_dev *pdev, struct intel_device_info *intel_info) +{ + int gttmmaddr_bar = intel_info->__runtime.graphics.ip.ver == 2 ? GEN2_GTTMMADR_BAR : GTTMMADR_BAR; + + return i915_pci_resource_valid(pdev, gttmmaddr_bar); +} + static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct intel_device_info *intel_info = @@ -1288,6 +1325,9 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (PCI_FUNC(pdev->devfn)) return -ENODEV; + if (!intel_mmio_bar_valid(pdev, intel_info)) + return -ENXIO; + /* Detect if we need to wait for other drivers early on */ if (intel_modeset_probe_defer(pdev)) return -EPROBE_DEFER; diff --git a/drivers/gpu/drm/i915/i915_pci.h b/drivers/gpu/drm/i915/i915_pci.h index ee048c238174..8dfe19f9a775 100644 --- a/drivers/gpu/drm/i915/i915_pci.h +++ b/drivers/gpu/drm/i915/i915_pci.h @@ -6,7 +6,13 @@ #ifndef __I915_PCI_H__ #define __I915_PCI_H__ +#include <linux/types.h> + +struct pci_dev; + int i915_pci_register_driver(void); void i915_pci_unregister_driver(void); +bool i915_pci_resource_valid(struct pci_dev *pdev, int bar); + #endif /* __I915_PCI_H__ */ diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index f3c23fe9ad9c..0defbb43ceea 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -1376,7 +1376,8 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream) { struct i915_perf *perf = stream->perf; - BUG_ON(stream != perf->exclusive_stream); + if (WARN_ON(stream != perf->exclusive_stream)) + return; /* * Unset exclusive_stream first, it will be checked while disabling diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 3168d7007e10..1a9bd829fc7e 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1125,8 +1125,12 @@ #define MBUS_DBOX_REGULATE_B2B_TRANSACTIONS_EN REG_BIT(16) /* tgl+ */ #define MBUS_DBOX_BW_CREDIT_MASK REG_GENMASK(15, 14) #define MBUS_DBOX_BW_CREDIT(x) REG_FIELD_PREP(MBUS_DBOX_BW_CREDIT_MASK, x) +#define MBUS_DBOX_BW_4CREDITS_MTL REG_FIELD_PREP(MBUS_DBOX_BW_CREDIT_MASK, 0x2) +#define MBUS_DBOX_BW_8CREDITS_MTL REG_FIELD_PREP(MBUS_DBOX_BW_CREDIT_MASK, 0x3) #define MBUS_DBOX_B_CREDIT_MASK REG_GENMASK(12, 8) #define MBUS_DBOX_B_CREDIT(x) REG_FIELD_PREP(MBUS_DBOX_B_CREDIT_MASK, x) +#define MBUS_DBOX_I_CREDIT_MASK REG_GENMASK(7, 5) +#define MBUS_DBOX_I_CREDIT(x) REG_FIELD_PREP(MBUS_DBOX_I_CREDIT_MASK, x) #define MBUS_DBOX_A_CREDIT_MASK REG_GENMASK(3, 0) #define MBUS_DBOX_A_CREDIT(x) REG_FIELD_PREP(MBUS_DBOX_A_CREDIT_MASK, x) @@ -1462,69 +1466,6 @@ #define FBC_REND_CACHE_CLEAN REG_BIT(1) /* - * GPIO regs - */ -#define GPIO(gpio) _MMIO(dev_priv->gpio_mmio_base + 0x5010 + \ - 4 * (gpio)) - -# define GPIO_CLOCK_DIR_MASK (1 << 0) -# define GPIO_CLOCK_DIR_IN (0 << 1) -# define GPIO_CLOCK_DIR_OUT (1 << 1) -# define GPIO_CLOCK_VAL_MASK (1 << 2) -# define GPIO_CLOCK_VAL_OUT (1 << 3) -# define GPIO_CLOCK_VAL_IN (1 << 4) -# define GPIO_CLOCK_PULLUP_DISABLE (1 << 5) -# define GPIO_DATA_DIR_MASK (1 << 8) -# define GPIO_DATA_DIR_IN (0 << 9) -# define GPIO_DATA_DIR_OUT (1 << 9) -# define GPIO_DATA_VAL_MASK (1 << 10) -# define GPIO_DATA_VAL_OUT (1 << 11) -# define GPIO_DATA_VAL_IN (1 << 12) -# define GPIO_DATA_PULLUP_DISABLE (1 << 13) - -#define GMBUS0 _MMIO(dev_priv->gpio_mmio_base + 0x5100) /* clock/port select */ -#define GMBUS_AKSV_SELECT (1 << 11) -#define GMBUS_RATE_100KHZ (0 << 8) -#define GMBUS_RATE_50KHZ (1 << 8) -#define GMBUS_RATE_400KHZ (2 << 8) /* reserved on Pineview */ -#define GMBUS_RATE_1MHZ (3 << 8) /* reserved on Pineview */ -#define GMBUS_HOLD_EXT (1 << 7) /* 300ns hold time, rsvd on Pineview */ -#define GMBUS_BYTE_CNT_OVERRIDE (1 << 6) - -#define GMBUS1 _MMIO(dev_priv->gpio_mmio_base + 0x5104) /* command/status */ -#define GMBUS_SW_CLR_INT (1 << 31) -#define GMBUS_SW_RDY (1 << 30) -#define GMBUS_ENT (1 << 29) /* enable timeout */ -#define GMBUS_CYCLE_NONE (0 << 25) -#define GMBUS_CYCLE_WAIT (1 << 25) -#define GMBUS_CYCLE_INDEX (2 << 25) -#define GMBUS_CYCLE_STOP (4 << 25) -#define GMBUS_BYTE_COUNT_SHIFT 16 -#define GMBUS_BYTE_COUNT_MAX 256U -#define GEN9_GMBUS_BYTE_COUNT_MAX 511U -#define GMBUS_SLAVE_INDEX_SHIFT 8 -#define GMBUS_SLAVE_ADDR_SHIFT 1 -#define GMBUS_SLAVE_READ (1 << 0) -#define GMBUS_SLAVE_WRITE (0 << 0) -#define GMBUS2 _MMIO(dev_priv->gpio_mmio_base + 0x5108) /* status */ -#define GMBUS_INUSE (1 << 15) -#define GMBUS_HW_WAIT_PHASE (1 << 14) -#define GMBUS_STALL_TIMEOUT (1 << 13) -#define GMBUS_INT (1 << 12) -#define GMBUS_HW_RDY (1 << 11) -#define GMBUS_SATOER (1 << 10) -#define GMBUS_ACTIVE (1 << 9) -#define GMBUS3 _MMIO(dev_priv->gpio_mmio_base + 0x510c) /* data buffer bytes 3-0 */ -#define GMBUS4 _MMIO(dev_priv->gpio_mmio_base + 0x5110) /* interrupt mask (Pineview+) */ -#define GMBUS_SLAVE_TIMEOUT_EN (1 << 4) -#define GMBUS_NAK_EN (1 << 3) -#define GMBUS_IDLE_EN (1 << 2) -#define GMBUS_HW_WAIT_EN (1 << 1) -#define GMBUS_HW_RDY_EN (1 << 0) -#define GMBUS5 _MMIO(dev_priv->gpio_mmio_base + 0x5120) /* byte index */ -#define GMBUS_2BYTE_INDEX_EN (1 << 31) - -/* * Clock control & power management */ #define _DPLL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x6014) @@ -1700,7 +1641,7 @@ #define DSTATE_PLL_D3_OFF (1 << 3) #define DSTATE_GFX_CLOCK_GATING (1 << 1) #define DSTATE_DOT_CLOCK_GATING (1 << 0) -#define DSPCLK_GATE_D _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x6200) +#define DSPCLK_GATE_D(__i915) _MMIO(DISPLAY_MMIO_BASE(__i915) + 0x6200) # define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */ # define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */ # define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* 965 */ @@ -1857,14 +1798,14 @@ #define GT0_PERF_LIMIT_REASONS _MMIO(0x1381a8) #define GT0_PERF_LIMIT_REASONS_MASK 0xde3 -#define PROCHOT_MASK REG_BIT(1) -#define THERMAL_LIMIT_MASK REG_BIT(2) -#define RATL_MASK REG_BIT(6) -#define VR_THERMALERT_MASK REG_BIT(7) -#define VR_TDC_MASK REG_BIT(8) -#define POWER_LIMIT_4_MASK REG_BIT(9) -#define POWER_LIMIT_1_MASK REG_BIT(11) -#define POWER_LIMIT_2_MASK REG_BIT(12) +#define PROCHOT_MASK REG_BIT(0) +#define THERMAL_LIMIT_MASK REG_BIT(1) +#define RATL_MASK REG_BIT(5) +#define VR_THERMALERT_MASK REG_BIT(6) +#define VR_TDC_MASK REG_BIT(7) +#define POWER_LIMIT_4_MASK REG_BIT(8) +#define POWER_LIMIT_1_MASK REG_BIT(10) +#define POWER_LIMIT_2_MASK REG_BIT(11) #define CHV_CLK_CTL1 _MMIO(0x101100) #define VLV_CLK_CTL2 _MMIO(0x101104) @@ -1916,6 +1857,13 @@ #define CLKGATE_DIS_PSL(pipe) \ _MMIO_PIPE(pipe, _CLKGATE_DIS_PSL_A, _CLKGATE_DIS_PSL_B) +#define _CLKGATE_DIS_PSL_EXT_A 0x4654C +#define _CLKGATE_DIS_PSL_EXT_B 0x46550 +#define PIPEDMC_GATING_DIS REG_BIT(12) + +#define CLKGATE_DIS_PSL_EXT(pipe) \ + _MMIO_PIPE(pipe, _CLKGATE_DIS_PSL_EXT_A, _CLKGATE_DIS_PSL_EXT_B) + /* * Display engine regs */ @@ -2822,7 +2770,7 @@ #define VLV_PPS_BASE (VLV_DISPLAY_BASE + PPS_BASE) #define PCH_PPS_BASE 0xC7200 -#define _MMIO_PPS(pps_idx, reg) _MMIO(dev_priv->pps_mmio_base - \ +#define _MMIO_PPS(pps_idx, reg) _MMIO(dev_priv->display.pps.mmio_base - \ PPS_BASE + (reg) + \ (pps_idx) * 0x100) @@ -2918,118 +2866,6 @@ #define PFIT_AUTO_RATIOS _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61238) -#define _VLV_BLC_PWM_CTL2_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61250) -#define _VLV_BLC_PWM_CTL2_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61350) -#define VLV_BLC_PWM_CTL2(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \ - _VLV_BLC_PWM_CTL2_B) - -#define _VLV_BLC_PWM_CTL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61254) -#define _VLV_BLC_PWM_CTL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61354) -#define VLV_BLC_PWM_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL_A, \ - _VLV_BLC_PWM_CTL_B) - -#define _VLV_BLC_HIST_CTL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61260) -#define _VLV_BLC_HIST_CTL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61360) -#define VLV_BLC_HIST_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_HIST_CTL_A, \ - _VLV_BLC_HIST_CTL_B) - -/* Backlight control */ -#define BLC_PWM_CTL2 _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61250) /* 965+ only */ -#define BLM_PWM_ENABLE (1 << 31) -#define BLM_COMBINATION_MODE (1 << 30) /* gen4 only */ -#define BLM_PIPE_SELECT (1 << 29) -#define BLM_PIPE_SELECT_IVB (3 << 29) -#define BLM_PIPE_A (0 << 29) -#define BLM_PIPE_B (1 << 29) -#define BLM_PIPE_C (2 << 29) /* ivb + */ -#define BLM_TRANSCODER_A BLM_PIPE_A /* hsw */ -#define BLM_TRANSCODER_B BLM_PIPE_B -#define BLM_TRANSCODER_C BLM_PIPE_C -#define BLM_TRANSCODER_EDP (3 << 29) -#define BLM_PIPE(pipe) ((pipe) << 29) -#define BLM_POLARITY_I965 (1 << 28) /* gen4 only */ -#define BLM_PHASE_IN_INTERUPT_STATUS (1 << 26) -#define BLM_PHASE_IN_ENABLE (1 << 25) -#define BLM_PHASE_IN_INTERUPT_ENABL (1 << 24) -#define BLM_PHASE_IN_TIME_BASE_SHIFT (16) -#define BLM_PHASE_IN_TIME_BASE_MASK (0xff << 16) -#define BLM_PHASE_IN_COUNT_SHIFT (8) -#define BLM_PHASE_IN_COUNT_MASK (0xff << 8) -#define BLM_PHASE_IN_INCR_SHIFT (0) -#define BLM_PHASE_IN_INCR_MASK (0xff << 0) -#define BLC_PWM_CTL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61254) -/* - * This is the most significant 15 bits of the number of backlight cycles in a - * complete cycle of the modulated backlight control. - * - * The actual value is this field multiplied by two. - */ -#define BACKLIGHT_MODULATION_FREQ_SHIFT (17) -#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17) -#define BLM_LEGACY_MODE (1 << 16) /* gen2 only */ -/* - * This is the number of cycles out of the backlight modulation cycle for which - * the backlight is on. - * - * This field must be no greater than the number of cycles in the complete - * backlight modulation cycle. - */ -#define BACKLIGHT_DUTY_CYCLE_SHIFT (0) -#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff) -#define BACKLIGHT_DUTY_CYCLE_MASK_PNV (0xfffe) -#define BLM_POLARITY_PNV (1 << 0) /* pnv only */ - -#define BLC_HIST_CTL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61260) -#define BLM_HISTOGRAM_ENABLE (1 << 31) - -/* New registers for PCH-split platforms. Safe where new bits show up, the - * register layout machtes with gen4 BLC_PWM_CTL[12]. */ -#define BLC_PWM_CPU_CTL2 _MMIO(0x48250) -#define BLC_PWM_CPU_CTL _MMIO(0x48254) - -#define HSW_BLC_PWM2_CTL _MMIO(0x48350) - -/* PCH CTL1 is totally different, all but the below bits are reserved. CTL2 is - * like the normal CTL from gen4 and earlier. Hooray for confusing naming. */ -#define BLC_PWM_PCH_CTL1 _MMIO(0xc8250) -#define BLM_PCH_PWM_ENABLE (1 << 31) -#define BLM_PCH_OVERRIDE_ENABLE (1 << 30) -#define BLM_PCH_POLARITY (1 << 29) -#define BLC_PWM_PCH_CTL2 _MMIO(0xc8254) - -#define UTIL_PIN_CTL _MMIO(0x48400) -#define UTIL_PIN_ENABLE (1 << 31) -#define UTIL_PIN_PIPE_MASK (3 << 29) -#define UTIL_PIN_PIPE(x) ((x) << 29) -#define UTIL_PIN_MODE_MASK (0xf << 24) -#define UTIL_PIN_MODE_DATA (0 << 24) -#define UTIL_PIN_MODE_PWM (1 << 24) -#define UTIL_PIN_MODE_VBLANK (4 << 24) -#define UTIL_PIN_MODE_VSYNC (5 << 24) -#define UTIL_PIN_MODE_EYE_LEVEL (8 << 24) -#define UTIL_PIN_OUTPUT_DATA (1 << 23) -#define UTIL_PIN_POLARITY (1 << 22) -#define UTIL_PIN_DIRECTION_INPUT (1 << 19) -#define UTIL_PIN_INPUT_DATA (1 << 16) - -/* BXT backlight register definition. */ -#define _BXT_BLC_PWM_CTL1 0xC8250 -#define BXT_BLC_PWM_ENABLE (1 << 31) -#define BXT_BLC_PWM_POLARITY (1 << 29) -#define _BXT_BLC_PWM_FREQ1 0xC8254 -#define _BXT_BLC_PWM_DUTY1 0xC8258 - -#define _BXT_BLC_PWM_CTL2 0xC8350 -#define _BXT_BLC_PWM_FREQ2 0xC8354 -#define _BXT_BLC_PWM_DUTY2 0xC8358 - -#define BXT_BLC_PWM_CTL(controller) _MMIO_PIPE(controller, \ - _BXT_BLC_PWM_CTL1, _BXT_BLC_PWM_CTL2) -#define BXT_BLC_PWM_FREQ(controller) _MMIO_PIPE(controller, \ - _BXT_BLC_PWM_FREQ1, _BXT_BLC_PWM_FREQ2) -#define BXT_BLC_PWM_DUTY(controller) _MMIO_PIPE(controller, \ - _BXT_BLC_PWM_DUTY1, _BXT_BLC_PWM_DUTY2) - #define PCH_GTC_CTL _MMIO(0xe7000) #define PCH_GTC_ENABLE (1 << 31) @@ -3619,6 +3455,34 @@ #define DP_AUX_CH_CTL(aux_ch) _MMIO_PORT(aux_ch, _DPA_AUX_CH_CTL, _DPB_AUX_CH_CTL) #define DP_AUX_CH_DATA(aux_ch, i) _MMIO(_PORT(aux_ch, _DPA_AUX_CH_DATA1, _DPB_AUX_CH_DATA1) + (i) * 4) /* 5 registers */ +#define _XELPDP_USBC1_AUX_CH_CTL 0x16F210 +#define _XELPDP_USBC2_AUX_CH_CTL 0x16F410 +#define _XELPDP_USBC3_AUX_CH_CTL 0x16F610 +#define _XELPDP_USBC4_AUX_CH_CTL 0x16F810 + +#define XELPDP_DP_AUX_CH_CTL(aux_ch) _MMIO(_PICK(aux_ch, \ + _DPA_AUX_CH_CTL, \ + _DPB_AUX_CH_CTL, \ + 0, /* port/aux_ch C is non-existent */ \ + _XELPDP_USBC1_AUX_CH_CTL, \ + _XELPDP_USBC2_AUX_CH_CTL, \ + _XELPDP_USBC3_AUX_CH_CTL, \ + _XELPDP_USBC4_AUX_CH_CTL)) + +#define _XELPDP_USBC1_AUX_CH_DATA1 0x16F214 +#define _XELPDP_USBC2_AUX_CH_DATA1 0x16F414 +#define _XELPDP_USBC3_AUX_CH_DATA1 0x16F614 +#define _XELPDP_USBC4_AUX_CH_DATA1 0x16F814 + +#define XELPDP_DP_AUX_CH_DATA(aux_ch, i) _MMIO(_PICK(aux_ch, \ + _DPA_AUX_CH_DATA1, \ + _DPB_AUX_CH_DATA1, \ + 0, /* port/aux_ch C is non-existent */ \ + _XELPDP_USBC1_AUX_CH_DATA1, \ + _XELPDP_USBC2_AUX_CH_DATA1, \ + _XELPDP_USBC3_AUX_CH_DATA1, \ + _XELPDP_USBC4_AUX_CH_DATA1) + (i) * 4) + #define DP_AUX_CH_CTL_SEND_BUSY (1 << 31) #define DP_AUX_CH_CTL_DONE (1 << 30) #define DP_AUX_CH_CTL_INTERRUPT (1 << 29) @@ -3631,6 +3495,8 @@ #define DP_AUX_CH_CTL_RECEIVE_ERROR (1 << 25) #define DP_AUX_CH_CTL_MESSAGE_SIZE_MASK (0x1f << 20) #define DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT 20 +#define XELPDP_DP_AUX_CH_CTL_POWER_REQUEST REG_BIT(19) +#define XELPDP_DP_AUX_CH_CTL_POWER_STATUS REG_BIT(18) #define DP_AUX_CH_CTL_PRECHARGE_2US_MASK (0xf << 16) #define DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT 16 #define DP_AUX_CH_CTL_AUX_AKSV_SELECT (1 << 15) @@ -5862,6 +5728,13 @@ [TRANSCODER_B] = _CHICKEN_TRANS_B, \ [TRANSCODER_C] = _CHICKEN_TRANS_C, \ [TRANSCODER_D] = _CHICKEN_TRANS_D)) + +#define _MTL_CHICKEN_TRANS_A 0x604e0 +#define _MTL_CHICKEN_TRANS_B 0x614e0 +#define MTL_CHICKEN_TRANS(trans) _MMIO_TRANS((trans), \ + _MTL_CHICKEN_TRANS_A, \ + _MTL_CHICKEN_TRANS_B) + #define HSW_FRAME_START_DELAY_MASK REG_GENMASK(28, 27) #define HSW_FRAME_START_DELAY(x) REG_FIELD_PREP(HSW_FRAME_START_DELAY_MASK, x) #define VSC_DATA_SEL_SOFTWARE_CONTROL REG_BIT(25) /* GLK */ @@ -5926,7 +5799,8 @@ _BW_BUDDY1_PAGE_MASK)) #define HSW_NDE_RSTWRN_OPT _MMIO(0x46408) -#define RESET_PCH_HANDSHAKE_ENABLE (1 << 4) +#define MTL_RESET_PICA_HANDSHAKE_EN REG_BIT(6) +#define RESET_PCH_HANDSHAKE_ENABLE REG_BIT(4) #define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430) #define SKL_SELECT_ALTERNATE_DC_EXIT REG_BIT(30) @@ -6718,10 +6592,10 @@ #define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245) #define BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ 0x18 #define GEN9_PCODE_READ_MEM_LATENCY 0x6 -#define GEN9_MEM_LATENCY_LEVEL_MASK 0xFF -#define GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT 8 -#define GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT 16 -#define GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT 24 +#define GEN9_MEM_LATENCY_LEVEL_3_7_MASK REG_GENMASK(31, 24) +#define GEN9_MEM_LATENCY_LEVEL_2_6_MASK REG_GENMASK(23, 16) +#define GEN9_MEM_LATENCY_LEVEL_1_5_MASK REG_GENMASK(15, 8) +#define GEN9_MEM_LATENCY_LEVEL_0_4_MASK REG_GENMASK(7, 0) #define SKL_PCODE_LOAD_HDCP_KEYS 0x5 #define SKL_PCODE_CDCLK_CONTROL 0x7 #define SKL_CDCLK_PREPARE_FOR_CHANGE 0x3 @@ -6937,265 +6811,6 @@ enum skl_power_gate { #define ICL_AUX_ANAOVRD1_LDO_BYPASS (1 << 7) #define ICL_AUX_ANAOVRD1_ENABLE (1 << 0) -/* HDCP Key Registers */ -#define HDCP_KEY_CONF _MMIO(0x66c00) -#define HDCP_AKSV_SEND_TRIGGER BIT(31) -#define HDCP_CLEAR_KEYS_TRIGGER BIT(30) -#define HDCP_KEY_LOAD_TRIGGER BIT(8) -#define HDCP_KEY_STATUS _MMIO(0x66c04) -#define HDCP_FUSE_IN_PROGRESS BIT(7) -#define HDCP_FUSE_ERROR BIT(6) -#define HDCP_FUSE_DONE BIT(5) -#define HDCP_KEY_LOAD_STATUS BIT(1) -#define HDCP_KEY_LOAD_DONE BIT(0) -#define HDCP_AKSV_LO _MMIO(0x66c10) -#define HDCP_AKSV_HI _MMIO(0x66c14) - -/* HDCP Repeater Registers */ -#define HDCP_REP_CTL _MMIO(0x66d00) -#define HDCP_TRANSA_REP_PRESENT BIT(31) -#define HDCP_TRANSB_REP_PRESENT BIT(30) -#define HDCP_TRANSC_REP_PRESENT BIT(29) -#define HDCP_TRANSD_REP_PRESENT BIT(28) -#define HDCP_DDIB_REP_PRESENT BIT(30) -#define HDCP_DDIA_REP_PRESENT BIT(29) -#define HDCP_DDIC_REP_PRESENT BIT(28) -#define HDCP_DDID_REP_PRESENT BIT(27) -#define HDCP_DDIF_REP_PRESENT BIT(26) -#define HDCP_DDIE_REP_PRESENT BIT(25) -#define HDCP_TRANSA_SHA1_M0 (1 << 20) -#define HDCP_TRANSB_SHA1_M0 (2 << 20) -#define HDCP_TRANSC_SHA1_M0 (3 << 20) -#define HDCP_TRANSD_SHA1_M0 (4 << 20) -#define HDCP_DDIB_SHA1_M0 (1 << 20) -#define HDCP_DDIA_SHA1_M0 (2 << 20) -#define HDCP_DDIC_SHA1_M0 (3 << 20) -#define HDCP_DDID_SHA1_M0 (4 << 20) -#define HDCP_DDIF_SHA1_M0 (5 << 20) -#define HDCP_DDIE_SHA1_M0 (6 << 20) /* Bspec says 5? */ -#define HDCP_SHA1_BUSY BIT(16) -#define HDCP_SHA1_READY BIT(17) -#define HDCP_SHA1_COMPLETE BIT(18) -#define HDCP_SHA1_V_MATCH BIT(19) -#define HDCP_SHA1_TEXT_32 (1 << 1) -#define HDCP_SHA1_COMPLETE_HASH (2 << 1) -#define HDCP_SHA1_TEXT_24 (4 << 1) -#define HDCP_SHA1_TEXT_16 (5 << 1) -#define HDCP_SHA1_TEXT_8 (6 << 1) -#define HDCP_SHA1_TEXT_0 (7 << 1) -#define HDCP_SHA_V_PRIME_H0 _MMIO(0x66d04) -#define HDCP_SHA_V_PRIME_H1 _MMIO(0x66d08) -#define HDCP_SHA_V_PRIME_H2 _MMIO(0x66d0C) -#define HDCP_SHA_V_PRIME_H3 _MMIO(0x66d10) -#define HDCP_SHA_V_PRIME_H4 _MMIO(0x66d14) -#define HDCP_SHA_V_PRIME(h) _MMIO((0x66d04 + (h) * 4)) -#define HDCP_SHA_TEXT _MMIO(0x66d18) - -/* HDCP Auth Registers */ -#define _PORTA_HDCP_AUTHENC 0x66800 -#define _PORTB_HDCP_AUTHENC 0x66500 -#define _PORTC_HDCP_AUTHENC 0x66600 -#define _PORTD_HDCP_AUTHENC 0x66700 -#define _PORTE_HDCP_AUTHENC 0x66A00 -#define _PORTF_HDCP_AUTHENC 0x66900 -#define _PORT_HDCP_AUTHENC(port, x) _MMIO(_PICK(port, \ - _PORTA_HDCP_AUTHENC, \ - _PORTB_HDCP_AUTHENC, \ - _PORTC_HDCP_AUTHENC, \ - _PORTD_HDCP_AUTHENC, \ - _PORTE_HDCP_AUTHENC, \ - _PORTF_HDCP_AUTHENC) + (x)) -#define PORT_HDCP_CONF(port) _PORT_HDCP_AUTHENC(port, 0x0) -#define _TRANSA_HDCP_CONF 0x66400 -#define _TRANSB_HDCP_CONF 0x66500 -#define TRANS_HDCP_CONF(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_CONF, \ - _TRANSB_HDCP_CONF) -#define HDCP_CONF(dev_priv, trans, port) \ - (GRAPHICS_VER(dev_priv) >= 12 ? \ - TRANS_HDCP_CONF(trans) : \ - PORT_HDCP_CONF(port)) - -#define HDCP_CONF_CAPTURE_AN BIT(0) -#define HDCP_CONF_AUTH_AND_ENC (BIT(1) | BIT(0)) -#define PORT_HDCP_ANINIT(port) _PORT_HDCP_AUTHENC(port, 0x4) -#define _TRANSA_HDCP_ANINIT 0x66404 -#define _TRANSB_HDCP_ANINIT 0x66504 -#define TRANS_HDCP_ANINIT(trans) _MMIO_TRANS(trans, \ - _TRANSA_HDCP_ANINIT, \ - _TRANSB_HDCP_ANINIT) -#define HDCP_ANINIT(dev_priv, trans, port) \ - (GRAPHICS_VER(dev_priv) >= 12 ? \ - TRANS_HDCP_ANINIT(trans) : \ - PORT_HDCP_ANINIT(port)) - -#define PORT_HDCP_ANLO(port) _PORT_HDCP_AUTHENC(port, 0x8) -#define _TRANSA_HDCP_ANLO 0x66408 -#define _TRANSB_HDCP_ANLO 0x66508 -#define TRANS_HDCP_ANLO(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_ANLO, \ - _TRANSB_HDCP_ANLO) -#define HDCP_ANLO(dev_priv, trans, port) \ - (GRAPHICS_VER(dev_priv) >= 12 ? \ - TRANS_HDCP_ANLO(trans) : \ - PORT_HDCP_ANLO(port)) - -#define PORT_HDCP_ANHI(port) _PORT_HDCP_AUTHENC(port, 0xC) -#define _TRANSA_HDCP_ANHI 0x6640C -#define _TRANSB_HDCP_ANHI 0x6650C -#define TRANS_HDCP_ANHI(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_ANHI, \ - _TRANSB_HDCP_ANHI) -#define HDCP_ANHI(dev_priv, trans, port) \ - (GRAPHICS_VER(dev_priv) >= 12 ? \ - TRANS_HDCP_ANHI(trans) : \ - PORT_HDCP_ANHI(port)) - -#define PORT_HDCP_BKSVLO(port) _PORT_HDCP_AUTHENC(port, 0x10) -#define _TRANSA_HDCP_BKSVLO 0x66410 -#define _TRANSB_HDCP_BKSVLO 0x66510 -#define TRANS_HDCP_BKSVLO(trans) _MMIO_TRANS(trans, \ - _TRANSA_HDCP_BKSVLO, \ - _TRANSB_HDCP_BKSVLO) -#define HDCP_BKSVLO(dev_priv, trans, port) \ - (GRAPHICS_VER(dev_priv) >= 12 ? \ - TRANS_HDCP_BKSVLO(trans) : \ - PORT_HDCP_BKSVLO(port)) - -#define PORT_HDCP_BKSVHI(port) _PORT_HDCP_AUTHENC(port, 0x14) -#define _TRANSA_HDCP_BKSVHI 0x66414 -#define _TRANSB_HDCP_BKSVHI 0x66514 -#define TRANS_HDCP_BKSVHI(trans) _MMIO_TRANS(trans, \ - _TRANSA_HDCP_BKSVHI, \ - _TRANSB_HDCP_BKSVHI) -#define HDCP_BKSVHI(dev_priv, trans, port) \ - (GRAPHICS_VER(dev_priv) >= 12 ? \ - TRANS_HDCP_BKSVHI(trans) : \ - PORT_HDCP_BKSVHI(port)) - -#define PORT_HDCP_RPRIME(port) _PORT_HDCP_AUTHENC(port, 0x18) -#define _TRANSA_HDCP_RPRIME 0x66418 -#define _TRANSB_HDCP_RPRIME 0x66518 -#define TRANS_HDCP_RPRIME(trans) _MMIO_TRANS(trans, \ - _TRANSA_HDCP_RPRIME, \ - _TRANSB_HDCP_RPRIME) -#define HDCP_RPRIME(dev_priv, trans, port) \ - (GRAPHICS_VER(dev_priv) >= 12 ? \ - TRANS_HDCP_RPRIME(trans) : \ - PORT_HDCP_RPRIME(port)) - -#define PORT_HDCP_STATUS(port) _PORT_HDCP_AUTHENC(port, 0x1C) -#define _TRANSA_HDCP_STATUS 0x6641C -#define _TRANSB_HDCP_STATUS 0x6651C -#define TRANS_HDCP_STATUS(trans) _MMIO_TRANS(trans, \ - _TRANSA_HDCP_STATUS, \ - _TRANSB_HDCP_STATUS) -#define HDCP_STATUS(dev_priv, trans, port) \ - (GRAPHICS_VER(dev_priv) >= 12 ? \ - TRANS_HDCP_STATUS(trans) : \ - PORT_HDCP_STATUS(port)) - -#define HDCP_STATUS_STREAM_A_ENC BIT(31) -#define HDCP_STATUS_STREAM_B_ENC BIT(30) -#define HDCP_STATUS_STREAM_C_ENC BIT(29) -#define HDCP_STATUS_STREAM_D_ENC BIT(28) -#define HDCP_STATUS_AUTH BIT(21) -#define HDCP_STATUS_ENC BIT(20) -#define HDCP_STATUS_RI_MATCH BIT(19) -#define HDCP_STATUS_R0_READY BIT(18) -#define HDCP_STATUS_AN_READY BIT(17) -#define HDCP_STATUS_CIPHER BIT(16) -#define HDCP_STATUS_FRAME_CNT(x) (((x) >> 8) & 0xff) - -/* HDCP2.2 Registers */ -#define _PORTA_HDCP2_BASE 0x66800 -#define _PORTB_HDCP2_BASE 0x66500 -#define _PORTC_HDCP2_BASE 0x66600 -#define _PORTD_HDCP2_BASE 0x66700 -#define _PORTE_HDCP2_BASE 0x66A00 -#define _PORTF_HDCP2_BASE 0x66900 -#define _PORT_HDCP2_BASE(port, x) _MMIO(_PICK((port), \ - _PORTA_HDCP2_BASE, \ - _PORTB_HDCP2_BASE, \ - _PORTC_HDCP2_BASE, \ - _PORTD_HDCP2_BASE, \ - _PORTE_HDCP2_BASE, \ - _PORTF_HDCP2_BASE) + (x)) - -#define PORT_HDCP2_AUTH(port) _PORT_HDCP2_BASE(port, 0x98) -#define _TRANSA_HDCP2_AUTH 0x66498 -#define _TRANSB_HDCP2_AUTH 0x66598 -#define TRANS_HDCP2_AUTH(trans) _MMIO_TRANS(trans, _TRANSA_HDCP2_AUTH, \ - _TRANSB_HDCP2_AUTH) -#define AUTH_LINK_AUTHENTICATED BIT(31) -#define AUTH_LINK_TYPE BIT(30) -#define AUTH_FORCE_CLR_INPUTCTR BIT(19) -#define AUTH_CLR_KEYS BIT(18) -#define HDCP2_AUTH(dev_priv, trans, port) \ - (GRAPHICS_VER(dev_priv) >= 12 ? \ - TRANS_HDCP2_AUTH(trans) : \ - PORT_HDCP2_AUTH(port)) - -#define PORT_HDCP2_CTL(port) _PORT_HDCP2_BASE(port, 0xB0) -#define _TRANSA_HDCP2_CTL 0x664B0 -#define _TRANSB_HDCP2_CTL 0x665B0 -#define TRANS_HDCP2_CTL(trans) _MMIO_TRANS(trans, _TRANSA_HDCP2_CTL, \ - _TRANSB_HDCP2_CTL) -#define CTL_LINK_ENCRYPTION_REQ BIT(31) -#define HDCP2_CTL(dev_priv, trans, port) \ - (GRAPHICS_VER(dev_priv) >= 12 ? \ - TRANS_HDCP2_CTL(trans) : \ - PORT_HDCP2_CTL(port)) - -#define PORT_HDCP2_STATUS(port) _PORT_HDCP2_BASE(port, 0xB4) -#define _TRANSA_HDCP2_STATUS 0x664B4 -#define _TRANSB_HDCP2_STATUS 0x665B4 -#define TRANS_HDCP2_STATUS(trans) _MMIO_TRANS(trans, \ - _TRANSA_HDCP2_STATUS, \ - _TRANSB_HDCP2_STATUS) -#define LINK_TYPE_STATUS BIT(22) -#define LINK_AUTH_STATUS BIT(21) -#define LINK_ENCRYPTION_STATUS BIT(20) -#define HDCP2_STATUS(dev_priv, trans, port) \ - (GRAPHICS_VER(dev_priv) >= 12 ? \ - TRANS_HDCP2_STATUS(trans) : \ - PORT_HDCP2_STATUS(port)) - -#define _PIPEA_HDCP2_STREAM_STATUS 0x668C0 -#define _PIPEB_HDCP2_STREAM_STATUS 0x665C0 -#define _PIPEC_HDCP2_STREAM_STATUS 0x666C0 -#define _PIPED_HDCP2_STREAM_STATUS 0x667C0 -#define PIPE_HDCP2_STREAM_STATUS(pipe) _MMIO(_PICK((pipe), \ - _PIPEA_HDCP2_STREAM_STATUS, \ - _PIPEB_HDCP2_STREAM_STATUS, \ - _PIPEC_HDCP2_STREAM_STATUS, \ - _PIPED_HDCP2_STREAM_STATUS)) - -#define _TRANSA_HDCP2_STREAM_STATUS 0x664C0 -#define _TRANSB_HDCP2_STREAM_STATUS 0x665C0 -#define TRANS_HDCP2_STREAM_STATUS(trans) _MMIO_TRANS(trans, \ - _TRANSA_HDCP2_STREAM_STATUS, \ - _TRANSB_HDCP2_STREAM_STATUS) -#define STREAM_ENCRYPTION_STATUS BIT(31) -#define STREAM_TYPE_STATUS BIT(30) -#define HDCP2_STREAM_STATUS(dev_priv, trans, port) \ - (GRAPHICS_VER(dev_priv) >= 12 ? \ - TRANS_HDCP2_STREAM_STATUS(trans) : \ - PIPE_HDCP2_STREAM_STATUS(pipe)) - -#define _PORTA_HDCP2_AUTH_STREAM 0x66F00 -#define _PORTB_HDCP2_AUTH_STREAM 0x66F04 -#define PORT_HDCP2_AUTH_STREAM(port) _MMIO_PORT(port, \ - _PORTA_HDCP2_AUTH_STREAM, \ - _PORTB_HDCP2_AUTH_STREAM) -#define _TRANSA_HDCP2_AUTH_STREAM 0x66F00 -#define _TRANSB_HDCP2_AUTH_STREAM 0x66F04 -#define TRANS_HDCP2_AUTH_STREAM(trans) _MMIO_TRANS(trans, \ - _TRANSA_HDCP2_AUTH_STREAM, \ - _TRANSB_HDCP2_AUTH_STREAM) -#define AUTH_STREAM_TYPE BIT(31) -#define HDCP2_AUTH_STREAM(dev_priv, trans, port) \ - (GRAPHICS_VER(dev_priv) >= 12 ? \ - TRANS_HDCP2_AUTH_STREAM(trans) : \ - PORT_HDCP2_AUTH_STREAM(port)) - /* Per-pipe DDI Function Control */ #define _TRANS_DDI_FUNC_CTL_A 0x60400 #define _TRANS_DDI_FUNC_CTL_B 0x61400 @@ -7503,16 +7118,16 @@ enum skl_power_gate { /* CDCLK_CTL */ #define CDCLK_CTL _MMIO(0x46000) -#define CDCLK_FREQ_SEL_MASK (3 << 26) -#define CDCLK_FREQ_450_432 (0 << 26) -#define CDCLK_FREQ_540 (1 << 26) -#define CDCLK_FREQ_337_308 (2 << 26) -#define CDCLK_FREQ_675_617 (3 << 26) -#define BXT_CDCLK_CD2X_DIV_SEL_MASK (3 << 22) -#define BXT_CDCLK_CD2X_DIV_SEL_1 (0 << 22) -#define BXT_CDCLK_CD2X_DIV_SEL_1_5 (1 << 22) -#define BXT_CDCLK_CD2X_DIV_SEL_2 (2 << 22) -#define BXT_CDCLK_CD2X_DIV_SEL_4 (3 << 22) +#define CDCLK_FREQ_SEL_MASK REG_GENMASK(27, 26) +#define CDCLK_FREQ_450_432 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 0) +#define CDCLK_FREQ_540 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 1) +#define CDCLK_FREQ_337_308 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 2) +#define CDCLK_FREQ_675_617 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 3) +#define BXT_CDCLK_CD2X_DIV_SEL_MASK REG_GENMASK(23, 22) +#define BXT_CDCLK_CD2X_DIV_SEL_1 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 0) +#define BXT_CDCLK_CD2X_DIV_SEL_1_5 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 1) +#define BXT_CDCLK_CD2X_DIV_SEL_2 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 2) +#define BXT_CDCLK_CD2X_DIV_SEL_4 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 3) #define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe) << 20) #define CDCLK_DIVMUX_CD_OVERRIDE (1 << 19) #define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3) @@ -8367,6 +7982,7 @@ enum skl_power_gate { #define ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB, \ _ICL_DSC1_PICTURE_PARAMETER_SET_0_PC) +#define DSC_ALT_ICH_SEL (1 << 20) #define DSC_VBR_ENABLE (1 << 19) #define DSC_422_ENABLE (1 << 18) #define DSC_COLOR_SPACE_CONVERSION (1 << 17) @@ -8717,4 +8333,27 @@ enum skl_power_gate { #define GEN12_CULLBIT2 _MMIO(0x7030) #define GEN12_STATE_ACK_DEBUG _MMIO(0x20BC) +#define MTL_LATENCY_LP0_LP1 _MMIO(0x45780) +#define MTL_LATENCY_LP2_LP3 _MMIO(0x45784) +#define MTL_LATENCY_LP4_LP5 _MMIO(0x45788) +#define MTL_LATENCY_LEVEL_EVEN_MASK REG_GENMASK(12, 0) +#define MTL_LATENCY_LEVEL_ODD_MASK REG_GENMASK(28, 16) + +#define MTL_LATENCY_SAGV _MMIO(0x4578b) +#define MTL_LATENCY_QCLK_SAGV REG_GENMASK(12, 0) + +#define MTL_MEM_SS_INFO_GLOBAL _MMIO(0x45700) +#define MTL_N_OF_ENABLED_QGV_POINTS_MASK REG_GENMASK(11, 8) +#define MTL_N_OF_POPULATED_CH_MASK REG_GENMASK(7, 4) +#define MTL_DDR_TYPE_MASK REG_GENMASK(3, 0) + +#define MTL_MEM_SS_INFO_QGV_POINT_LOW(point) _MMIO(0x45710 + (point) * 2) +#define MTL_TRCD_MASK REG_GENMASK(31, 24) +#define MTL_TRP_MASK REG_GENMASK(23, 16) +#define MTL_DCLK_MASK REG_GENMASK(15, 0) + +#define MTL_MEM_SS_INFO_QGV_POINT_HIGH(point) _MMIO(0x45714 + (point) * 2) +#define MTL_TRAS_MASK REG_GENMASK(16, 8) +#define MTL_TRDPRE_MASK REG_GENMASK(7, 0) + #endif /* _I915_REG_H_ */ diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c index ae984c66c48a..6fc0d1b89690 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/i915_sw_fence.c @@ -241,8 +241,6 @@ void __i915_sw_fence_init(struct i915_sw_fence *fence, const char *name, struct lock_class_key *key) { - BUG_ON(!fn); - __init_waitqueue_head(&fence->wait, name, key); fence->fn = fn; #ifdef CONFIG_DRM_I915_SW_FENCE_CHECK_DAG diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h index a7c603bc1b01..619fc5a22f0c 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.h +++ b/drivers/gpu/drm/i915/i915_sw_fence.h @@ -48,11 +48,15 @@ void __i915_sw_fence_init(struct i915_sw_fence *fence, do { \ static struct lock_class_key __key; \ \ + BUILD_BUG_ON((fn) == NULL); \ __i915_sw_fence_init((fence), (fn), #fence, &__key); \ } while (0) #else #define i915_sw_fence_init(fence, fn) \ - __i915_sw_fence_init((fence), (fn), NULL, NULL) +do { \ + BUILD_BUG_ON((fn) == NULL); \ + __i915_sw_fence_init((fence), (fn), NULL, NULL); \ +} while (0) #endif void i915_sw_fence_reinit(struct i915_sw_fence *fence); diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h index c10d68cdc3ca..6c14d13364bf 100644 --- a/drivers/gpu/drm/i915/i915_utils.h +++ b/drivers/gpu/drm/i915/i915_utils.h @@ -360,10 +360,6 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) #define KHz(x) (1000 * (x)) #define MHz(x) KHz(1000 * (x)) -#define KBps(x) (1000 * (x)) -#define MBps(x) KBps(1000 * (x)) -#define GBps(x) ((u64)1000 * MBps((x))) - void add_taint_for_CI(struct drm_i915_private *i915, unsigned int taint); static inline void __add_taint_for_CI(unsigned int taint) { diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index d98fbbd589aa..1434dc33cf49 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -88,46 +88,57 @@ const char *intel_platform_name(enum intel_platform platform) return platform_names[platform]; } -void intel_device_info_print_static(const struct intel_device_info *info, - struct drm_printer *p) +void intel_device_info_print(const struct intel_device_info *info, + const struct intel_runtime_info *runtime, + struct drm_printer *p) { - if (info->graphics.rel) - drm_printf(p, "graphics version: %u.%02u\n", info->graphics.ver, - info->graphics.rel); + if (runtime->graphics.ip.rel) + drm_printf(p, "graphics version: %u.%02u\n", + runtime->graphics.ip.ver, + runtime->graphics.ip.rel); else - drm_printf(p, "graphics version: %u\n", info->graphics.ver); + drm_printf(p, "graphics version: %u\n", + runtime->graphics.ip.ver); - if (info->media.rel) - drm_printf(p, "media version: %u.%02u\n", info->media.ver, info->media.rel); + if (runtime->media.ip.rel) + drm_printf(p, "media version: %u.%02u\n", + runtime->media.ip.ver, + runtime->media.ip.rel); else - drm_printf(p, "media version: %u\n", info->media.ver); + drm_printf(p, "media version: %u\n", + runtime->media.ip.ver); - if (info->display.rel) - drm_printf(p, "display version: %u.%02u\n", info->display.ver, info->display.rel); + if (runtime->display.ip.rel) + drm_printf(p, "display version: %u.%02u\n", + runtime->display.ip.ver, + runtime->display.ip.rel); else - drm_printf(p, "display version: %u\n", info->display.ver); + drm_printf(p, "display version: %u\n", + runtime->display.ip.ver); drm_printf(p, "gt: %d\n", info->gt); - drm_printf(p, "memory-regions: %x\n", info->memory_regions); - drm_printf(p, "page-sizes: %x\n", info->page_sizes); + drm_printf(p, "memory-regions: %x\n", runtime->memory_regions); + drm_printf(p, "page-sizes: %x\n", runtime->page_sizes); drm_printf(p, "platform: %s\n", intel_platform_name(info->platform)); - drm_printf(p, "ppgtt-size: %d\n", info->ppgtt_size); - drm_printf(p, "ppgtt-type: %d\n", info->ppgtt_type); + drm_printf(p, "ppgtt-size: %d\n", runtime->ppgtt_size); + drm_printf(p, "ppgtt-type: %d\n", runtime->ppgtt_type); drm_printf(p, "dma_mask_size: %u\n", info->dma_mask_size); #define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, str_yes_no(info->name)) DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG); #undef PRINT_FLAG + drm_printf(p, "has_pooled_eu: %s\n", str_yes_no(runtime->has_pooled_eu)); + #define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, str_yes_no(info->display.name)) DEV_INFO_DISPLAY_FOR_EACH_FLAG(PRINT_FLAG); #undef PRINT_FLAG -} -void intel_device_info_print_runtime(const struct intel_runtime_info *info, - struct drm_printer *p) -{ - drm_printf(p, "rawclk rate: %u kHz\n", info->rawclk_freq); + drm_printf(p, "has_hdcp: %s\n", str_yes_no(runtime->has_hdcp)); + drm_printf(p, "has_dmc: %s\n", str_yes_no(runtime->has_dmc)); + drm_printf(p, "has_dsc: %s\n", str_yes_no(runtime->has_dsc)); + + drm_printf(p, "rawclk rate: %u kHz\n", runtime->rawclk_freq); } #undef INTEL_VGA_DEVICE @@ -364,55 +375,55 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) { drm_info(&dev_priv->drm, "Display fused off, disabling\n"); - info->display.pipe_mask = 0; - info->display.cpu_transcoder_mask = 0; - info->display.fbc_mask = 0; + runtime->pipe_mask = 0; + runtime->cpu_transcoder_mask = 0; + runtime->fbc_mask = 0; } else if (fuse_strap & IVB_PIPE_C_DISABLE) { drm_info(&dev_priv->drm, "PipeC fused off\n"); - info->display.pipe_mask &= ~BIT(PIPE_C); - info->display.cpu_transcoder_mask &= ~BIT(TRANSCODER_C); + runtime->pipe_mask &= ~BIT(PIPE_C); + runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_C); } } else if (HAS_DISPLAY(dev_priv) && DISPLAY_VER(dev_priv) >= 9) { u32 dfsm = intel_de_read(dev_priv, SKL_DFSM); if (dfsm & SKL_DFSM_PIPE_A_DISABLE) { - info->display.pipe_mask &= ~BIT(PIPE_A); - info->display.cpu_transcoder_mask &= ~BIT(TRANSCODER_A); - info->display.fbc_mask &= ~BIT(INTEL_FBC_A); + runtime->pipe_mask &= ~BIT(PIPE_A); + runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_A); + runtime->fbc_mask &= ~BIT(INTEL_FBC_A); } if (dfsm & SKL_DFSM_PIPE_B_DISABLE) { - info->display.pipe_mask &= ~BIT(PIPE_B); - info->display.cpu_transcoder_mask &= ~BIT(TRANSCODER_B); + runtime->pipe_mask &= ~BIT(PIPE_B); + runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_B); } if (dfsm & SKL_DFSM_PIPE_C_DISABLE) { - info->display.pipe_mask &= ~BIT(PIPE_C); - info->display.cpu_transcoder_mask &= ~BIT(TRANSCODER_C); + runtime->pipe_mask &= ~BIT(PIPE_C); + runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_C); } if (DISPLAY_VER(dev_priv) >= 12 && (dfsm & TGL_DFSM_PIPE_D_DISABLE)) { - info->display.pipe_mask &= ~BIT(PIPE_D); - info->display.cpu_transcoder_mask &= ~BIT(TRANSCODER_D); + runtime->pipe_mask &= ~BIT(PIPE_D); + runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_D); } if (dfsm & SKL_DFSM_DISPLAY_HDCP_DISABLE) - info->display.has_hdcp = 0; + runtime->has_hdcp = 0; if (dfsm & SKL_DFSM_DISPLAY_PM_DISABLE) - info->display.fbc_mask = 0; + runtime->fbc_mask = 0; if (DISPLAY_VER(dev_priv) >= 11 && (dfsm & ICL_DFSM_DMC_DISABLE)) - info->display.has_dmc = 0; + runtime->has_dmc = 0; if (DISPLAY_VER(dev_priv) >= 10 && (dfsm & GLK_DFSM_DISPLAY_DSC_DISABLE)) - info->display.has_dsc = 0; + runtime->has_dsc = 0; } if (GRAPHICS_VER(dev_priv) == 6 && i915_vtd_active(dev_priv)) { drm_info(&dev_priv->drm, "Disabling ppGTT for VT-d support\n"); - info->ppgtt_type = INTEL_PPGTT_NONE; + runtime->ppgtt_type = INTEL_PPGTT_NONE; } runtime->rawclk_freq = intel_read_rawclk(dev_priv); diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index e681bc6ed8e9..d638235e1d26 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -37,6 +37,7 @@ struct drm_printer; struct drm_i915_private; +struct intel_gt_definition; /* Keep in gen based order, and chronological order within a gen */ enum intel_platform { @@ -164,7 +165,6 @@ enum intel_ppgtt_type { func(has_media_ratio_mode); \ func(has_mslice_steering); \ func(has_one_eu_per_fuse_bit); \ - func(has_pooled_eu); \ func(has_pxp); \ func(has_rc6); \ func(has_rc6p); \ @@ -180,14 +180,11 @@ enum intel_ppgtt_type { /* Keep in alphabetical order */ \ func(cursor_needs_physical); \ func(has_cdclk_crawl); \ - func(has_dmc); \ func(has_ddi); \ func(has_dp_mst); \ func(has_dsb); \ - func(has_dsc); \ func(has_fpga_dbg); \ func(has_gmch); \ - func(has_hdcp); \ func(has_hotplug); \ func(has_hti); \ func(has_ipc); \ @@ -203,23 +200,67 @@ struct ip_version { u8 rel; }; -struct intel_device_info { - struct ip_version graphics; - struct ip_version media; +struct intel_runtime_info { + struct { + struct ip_version ip; + } graphics; + struct { + struct ip_version ip; + } media; + struct { + struct ip_version ip; + } display; + + /* + * Platform mask is used for optimizing or-ed IS_PLATFORM calls into + * single runtime conditionals, and also to provide groundwork for + * future per platform, or per SKU build optimizations. + * + * Array can be extended when necessary if the corresponding + * BUILD_BUG_ON is hit. + */ + u32 platform_mask[2]; + + u16 device_id; intel_engine_mask_t platform_engine_mask; /* Engines supported by the HW */ - enum intel_platform platform; + u32 rawclk_freq; - unsigned int dma_mask_size; /* available DMA address bits */ + struct intel_step_info step; + + unsigned int page_sizes; /* page sizes supported by the HW */ enum intel_ppgtt_type ppgtt_type; unsigned int ppgtt_size; /* log2, e.g. 31/32/48 bits */ - unsigned int page_sizes; /* page sizes supported by the HW */ - u32 memory_regions; /* regions supported by the HW */ + bool has_pooled_eu; + + /* display */ + struct { + u8 pipe_mask; + u8 cpu_transcoder_mask; + + u8 num_sprites[I915_MAX_PIPES]; + u8 num_scalers[I915_MAX_PIPES]; + + u8 fbc_mask; + + bool has_hdcp; + bool has_dmc; + bool has_dsc; + }; +}; + +struct intel_device_info { + enum intel_platform platform; + + unsigned int dma_mask_size; /* available DMA address bits */ + + const struct intel_gt_definition *extra_gt_list; + u8 gt; /* GT number, 0 if undefined */ #define DEFINE_FLAG(name) u8 name:1 @@ -227,12 +268,6 @@ struct intel_device_info { #undef DEFINE_FLAG struct { - u8 ver; - u8 rel; - - u8 pipe_mask; - u8 cpu_transcoder_mask; - u8 fbc_mask; u8 abox_mask; struct { @@ -259,27 +294,11 @@ struct intel_device_info { u32 gamma_lut_tests; } color; } display; -}; -struct intel_runtime_info { /* - * Platform mask is used for optimizing or-ed IS_PLATFORM calls into - * into single runtime conditionals, and also to provide groundwork - * for future per platform, or per SKU build optimizations. - * - * Array can be extended when necessary if the corresponding - * BUILD_BUG_ON is hit. + * Initial runtime info. Do not access outside of i915_driver_create(). */ - u32 platform_mask[2]; - - u16 device_id; - - u8 num_sprites[I915_MAX_PIPES]; - u8 num_scalers[I915_MAX_PIPES]; - - u32 rawclk_freq; - - struct intel_step_info step; + const struct intel_runtime_info __runtime; }; struct intel_driver_caps { @@ -292,10 +311,9 @@ const char *intel_platform_name(enum intel_platform platform); void intel_device_info_subplatform_init(struct drm_i915_private *dev_priv); void intel_device_info_runtime_init(struct drm_i915_private *dev_priv); -void intel_device_info_print_static(const struct intel_device_info *info, - struct drm_printer *p); -void intel_device_info_print_runtime(const struct intel_runtime_info *info, - struct drm_printer *p); +void intel_device_info_print(const struct intel_device_info *info, + const struct intel_runtime_info *runtime, + struct drm_printer *p); void intel_driver_caps_print(const struct intel_driver_caps *caps, struct drm_printer *p); diff --git a/drivers/gpu/drm/i915/intel_dram.c b/drivers/gpu/drm/i915/intel_dram.c index 437447119770..2403ccd52c74 100644 --- a/drivers/gpu/drm/i915/intel_dram.c +++ b/drivers/gpu/drm/i915/intel_dram.c @@ -466,6 +466,43 @@ static int gen12_get_dram_info(struct drm_i915_private *i915) return icl_pcode_read_mem_global_info(i915); } +static int xelpdp_get_dram_info(struct drm_i915_private *i915) +{ + u32 val = intel_uncore_read(&i915->uncore, MTL_MEM_SS_INFO_GLOBAL); + struct dram_info *dram_info = &i915->dram_info; + + val = REG_FIELD_GET(MTL_DDR_TYPE_MASK, val); + switch (val) { + case 0: + dram_info->type = INTEL_DRAM_DDR4; + break; + case 1: + dram_info->type = INTEL_DRAM_DDR5; + break; + case 2: + dram_info->type = INTEL_DRAM_LPDDR5; + break; + case 3: + dram_info->type = INTEL_DRAM_LPDDR4; + break; + case 4: + dram_info->type = INTEL_DRAM_DDR3; + break; + case 5: + dram_info->type = INTEL_DRAM_LPDDR3; + break; + default: + MISSING_CASE(val); + return -EINVAL; + } + + dram_info->num_channels = REG_FIELD_GET(MTL_N_OF_POPULATED_CH_MASK, val); + dram_info->num_qgv_points = REG_FIELD_GET(MTL_N_OF_ENABLED_QGV_POINTS_MASK, val); + /* PSF GV points not supported in D14+ */ + + return 0; +} + void intel_dram_detect(struct drm_i915_private *i915) { struct dram_info *dram_info = &i915->dram_info; @@ -480,7 +517,9 @@ void intel_dram_detect(struct drm_i915_private *i915) */ dram_info->wm_lv_0_adjust_needed = !IS_GEN9_LP(i915); - if (GRAPHICS_VER(i915) >= 12) + if (DISPLAY_VER(i915) >= 14) + ret = xelpdp_get_dram_info(i915); + else if (GRAPHICS_VER(i915) >= 12) ret = gen12_get_dram_info(i915); else if (GRAPHICS_VER(i915) >= 11) ret = gen11_get_dram_info(i915); diff --git a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c index 157e166672d7..e015bc91a26f 100644 --- a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c +++ b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c @@ -4,6 +4,7 @@ */ #include "display/intel_audio_regs.h" +#include "display/intel_backlight_regs.h" #include "display/intel_dmc_regs.h" #include "display/vlv_dsi_pll_regs.h" #include "gt/intel_gt_regs.h" diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c index 0fec25be146a..ba9843cb1b13 100644 --- a/drivers/gpu/drm/i915/intel_pch.c +++ b/drivers/gpu/drm/i915/intel_pch.c @@ -138,6 +138,11 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id) drm_WARN_ON(&dev_priv->drm, !IS_ALDERLAKE_S(dev_priv) && !IS_ALDERLAKE_P(dev_priv)); return PCH_ADP; + case INTEL_PCH_MTP_DEVICE_ID_TYPE: + case INTEL_PCH_MTP2_DEVICE_ID_TYPE: + drm_dbg_kms(&dev_priv->drm, "Found Meteor Lake PCH\n"); + drm_WARN_ON(&dev_priv->drm, !IS_METEORLAKE(dev_priv)); + return PCH_MTP; default: return PCH_NONE; } @@ -166,7 +171,9 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv, * make an educated guess as to which PCH is really there. */ - if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv)) + if (IS_METEORLAKE(dev_priv)) + id = INTEL_PCH_MTP_DEVICE_ID_TYPE; + else if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv)) id = INTEL_PCH_ADP_DEVICE_ID_TYPE; else if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv)) id = INTEL_PCH_TGP_DEVICE_ID_TYPE; diff --git a/drivers/gpu/drm/i915/intel_pch.h b/drivers/gpu/drm/i915/intel_pch.h index 7c8ce9781d1a..32aff5a70d04 100644 --- a/drivers/gpu/drm/i915/intel_pch.h +++ b/drivers/gpu/drm/i915/intel_pch.h @@ -25,6 +25,7 @@ enum intel_pch { PCH_ICP, /* Ice Lake/Jasper Lake PCH */ PCH_TGP, /* Tiger Lake/Mule Creek Canyon PCH */ PCH_ADP, /* Alder Lake PCH */ + PCH_MTP, /* Meteor Lake PCH */ /* Fake PCHs, functionality handled on the same PCI dev */ PCH_DG1 = 1024, @@ -57,12 +58,15 @@ enum intel_pch { #define INTEL_PCH_ADP2_DEVICE_ID_TYPE 0x5180 #define INTEL_PCH_ADP3_DEVICE_ID_TYPE 0x7A00 #define INTEL_PCH_ADP4_DEVICE_ID_TYPE 0x5480 +#define INTEL_PCH_MTP_DEVICE_ID_TYPE 0x7E00 +#define INTEL_PCH_MTP2_DEVICE_ID_TYPE 0xAE00 #define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100 #define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000 #define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */ #define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type) #define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id) +#define HAS_PCH_MTP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_MTP) #define HAS_PCH_DG2(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_DG2) #define HAS_PCH_ADP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ADP) #define HAS_PCH_DG1(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_DG1) diff --git a/drivers/gpu/drm/i915/intel_pci_config.h b/drivers/gpu/drm/i915/intel_pci_config.h index 12cd9d4f23de..4977a524ce6f 100644 --- a/drivers/gpu/drm/i915/intel_pci_config.h +++ b/drivers/gpu/drm/i915/intel_pci_config.h @@ -6,6 +6,13 @@ #ifndef __INTEL_PCI_CONFIG_H__ #define __INTEL_PCI_CONFIG_H__ +/* PCI BARs */ +#define GTTMMADR_BAR 0 +#define GEN2_GTTMMADR_BAR 1 +#define GFXMEM_BAR 2 +#define GTT_APERTURE_BAR GFXMEM_BAR +#define GEN12_LMEM_BAR GFXMEM_BAR + /* BSM in include/drm/i915_drm.h */ #define MCHBAR_I915 0x44 diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index ef7553b494ea..8f86f56e7ca4 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -25,60 +25,22 @@ * */ -#include <linux/module.h> -#include <linux/string_helpers.h> -#include <linux/pm_runtime.h> - -#include <drm/drm_atomic_helper.h> -#include <drm/drm_blend.h> -#include <drm/drm_fourcc.h> - -#include "display/intel_atomic.h" -#include "display/intel_atomic_plane.h" -#include "display/intel_bw.h" #include "display/intel_de.h" #include "display/intel_display_trace.h" -#include "display/intel_display_types.h" -#include "display/intel_fb.h" -#include "display/intel_fbc.h" -#include "display/intel_sprite.h" -#include "display/skl_universal_plane.h" +#include "display/skl_watermark.h" #include "gt/intel_engine_regs.h" #include "gt/intel_gt_regs.h" -#include "gt/intel_llc.h" #include "i915_drv.h" -#include "i915_fixed.h" -#include "i915_irq.h" #include "intel_mchbar_regs.h" -#include "intel_pcode.h" #include "intel_pm.h" #include "vlv_sideband.h" -#include "../../../platform/x86/intel_ips.h" - -static void skl_sagv_disable(struct drm_i915_private *dev_priv); struct drm_i915_clock_gating_funcs { void (*init_clock_gating)(struct drm_i915_private *i915); }; -/* Stores plane specific WM parameters */ -struct skl_wm_params { - bool x_tiled, y_tiled; - bool rc_surface; - bool is_planar; - u32 width; - u8 cpp; - u32 plane_pixel_rate; - u32 y_min_scanlines; - u32 plane_bytes_per_line; - uint_fixed_16_16_t plane_blocks_per_line; - uint_fixed_16_16_t y_tile_minimum; - u32 linetime_us; - u32 dbuf_block_size; -}; - /* used in computing the new watermarks state */ struct intel_wm_config { unsigned int num_pipes_active; @@ -468,13 +430,13 @@ bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) { bool ret; - mutex_lock(&dev_priv->wm.wm_mutex); + mutex_lock(&dev_priv->display.wm.wm_mutex); ret = _intel_set_memory_cxsr(dev_priv, enable); if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - dev_priv->wm.vlv.cxsr = enable; + dev_priv->display.wm.vlv.cxsr = enable; else if (IS_G4X(dev_priv)) - dev_priv->wm.g4x.cxsr = enable; - mutex_unlock(&dev_priv->wm.wm_mutex); + dev_priv->display.wm.g4x.cxsr = enable; + mutex_unlock(&dev_priv->display.wm.wm_mutex); return ret; } @@ -834,11 +796,11 @@ static bool is_enabling(int old, int new, int threshold) static int intel_wm_num_levels(struct drm_i915_private *dev_priv) { - return dev_priv->wm.max_level + 1; + return dev_priv->display.wm.max_level + 1; } -static bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) +bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); @@ -1093,11 +1055,11 @@ static void vlv_write_wm_values(struct drm_i915_private *dev_priv, static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv) { /* all latencies in usec */ - dev_priv->wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5; - dev_priv->wm.pri_latency[G4X_WM_LEVEL_SR] = 12; - dev_priv->wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35; + dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5; + dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_SR] = 12; + dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35; - dev_priv->wm.max_level = G4X_WM_LEVEL_HPLL; + dev_priv->display.wm.max_level = G4X_WM_LEVEL_HPLL; } static int g4x_plane_fifo_size(enum plane_id plane_id, int level) @@ -1150,7 +1112,7 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state, struct drm_i915_private *dev_priv = to_i915(plane->base.dev); const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; - unsigned int latency = dev_priv->wm.pri_latency[level] * 10; + unsigned int latency = dev_priv->display.wm.pri_latency[level] * 10; unsigned int pixel_rate, htotal, cpp, width, wm; if (latency == 0) @@ -1324,7 +1286,7 @@ static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - if (level > dev_priv->wm.max_level) + if (level > dev_priv->display.wm.max_level) return false; return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) && @@ -1583,7 +1545,7 @@ static void g4x_merge_wm(struct drm_i915_private *dev_priv, static void g4x_program_watermarks(struct drm_i915_private *dev_priv) { - struct g4x_wm_values *old_wm = &dev_priv->wm.g4x; + struct g4x_wm_values *old_wm = &dev_priv->display.wm.g4x; struct g4x_wm_values new_wm = {}; g4x_merge_wm(dev_priv, &new_wm); @@ -1609,10 +1571,10 @@ static void g4x_initial_watermarks(struct intel_atomic_state *state, const struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - mutex_lock(&dev_priv->wm.wm_mutex); + mutex_lock(&dev_priv->display.wm.wm_mutex); crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate; g4x_program_watermarks(dev_priv); - mutex_unlock(&dev_priv->wm.wm_mutex); + mutex_unlock(&dev_priv->display.wm.wm_mutex); } static void g4x_optimize_watermarks(struct intel_atomic_state *state, @@ -1625,10 +1587,10 @@ static void g4x_optimize_watermarks(struct intel_atomic_state *state, if (!crtc_state->wm.need_postvbl_update) return; - mutex_lock(&dev_priv->wm.wm_mutex); + mutex_lock(&dev_priv->display.wm.wm_mutex); crtc->wm.active.g4x = crtc_state->wm.g4x.optimal; g4x_program_watermarks(dev_priv); - mutex_unlock(&dev_priv->wm.wm_mutex); + mutex_unlock(&dev_priv->display.wm.wm_mutex); } /* latency must be in 0.1us units. */ @@ -1650,15 +1612,15 @@ static unsigned int vlv_wm_method2(unsigned int pixel_rate, static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv) { /* all latencies in usec */ - dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3; + dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM2] = 3; - dev_priv->wm.max_level = VLV_WM_LEVEL_PM2; + dev_priv->display.wm.max_level = VLV_WM_LEVEL_PM2; if (IS_CHERRYVIEW(dev_priv)) { - dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12; - dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33; + dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM5] = 12; + dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33; - dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS; + dev_priv->display.wm.max_level = VLV_WM_LEVEL_DDR_DVFS; } } @@ -1672,7 +1634,7 @@ static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state, &crtc_state->hw.pipe_mode; unsigned int pixel_rate, htotal, cpp, width, wm; - if (dev_priv->wm.pri_latency[level] == 0) + if (dev_priv->display.wm.pri_latency[level] == 0) return USHRT_MAX; if (!intel_wm_plane_visible(crtc_state, plane_state)) @@ -1693,7 +1655,7 @@ static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state, wm = 63; } else { wm = vlv_wm_method2(pixel_rate, htotal, width, cpp, - dev_priv->wm.pri_latency[level] * 10); + dev_priv->display.wm.pri_latency[level] * 10); } return min_t(unsigned int, wm, USHRT_MAX); @@ -2158,7 +2120,7 @@ static void vlv_merge_wm(struct drm_i915_private *dev_priv, struct intel_crtc *crtc; int num_active_pipes = 0; - wm->level = dev_priv->wm.max_level; + wm->level = dev_priv->display.wm.max_level; wm->cxsr = true; for_each_intel_crtc(&dev_priv->drm, crtc) { @@ -2197,7 +2159,7 @@ static void vlv_merge_wm(struct drm_i915_private *dev_priv, static void vlv_program_watermarks(struct drm_i915_private *dev_priv) { - struct vlv_wm_values *old_wm = &dev_priv->wm.vlv; + struct vlv_wm_values *old_wm = &dev_priv->display.wm.vlv; struct vlv_wm_values new_wm = {}; vlv_merge_wm(dev_priv, &new_wm); @@ -2235,10 +2197,10 @@ static void vlv_initial_watermarks(struct intel_atomic_state *state, const struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - mutex_lock(&dev_priv->wm.wm_mutex); + mutex_lock(&dev_priv->display.wm.wm_mutex); crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate; vlv_program_watermarks(dev_priv); - mutex_unlock(&dev_priv->wm.wm_mutex); + mutex_unlock(&dev_priv->display.wm.wm_mutex); } static void vlv_optimize_watermarks(struct intel_atomic_state *state, @@ -2251,10 +2213,10 @@ static void vlv_optimize_watermarks(struct intel_atomic_state *state, if (!crtc_state->wm.need_postvbl_update) return; - mutex_lock(&dev_priv->wm.wm_mutex); + mutex_lock(&dev_priv->display.wm.wm_mutex); crtc->wm.active.vlv = crtc_state->wm.vlv.optimal; vlv_program_watermarks(dev_priv); - mutex_unlock(&dev_priv->wm.wm_mutex); + mutex_unlock(&dev_priv->display.wm.wm_mutex); } static void i965_update_wm(struct drm_i915_private *dev_priv) @@ -2835,9 +2797,9 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv, const struct intel_plane_state *curstate, struct intel_wm_level *result) { - u16 pri_latency = dev_priv->wm.pri_latency[level]; - u16 spr_latency = dev_priv->wm.spr_latency[level]; - u16 cur_latency = dev_priv->wm.cur_latency[level]; + u16 pri_latency = dev_priv->display.wm.pri_latency[level]; + u16 spr_latency = dev_priv->display.wm.spr_latency[level]; + u16 cur_latency = dev_priv->display.wm.cur_latency[level]; /* WM1+ latency values stored in 0.5us units */ if (level > 0) { @@ -2861,119 +2823,43 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv, result->enable = true; } -static void intel_read_wm_latency(struct drm_i915_private *dev_priv, - u16 wm[]) +static void hsw_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) { - struct intel_uncore *uncore = &dev_priv->uncore; - - if (DISPLAY_VER(dev_priv) >= 9) { - u32 val; - int ret, i; - int level, max_level = ilk_wm_max_level(dev_priv); - int mult = IS_DG2(dev_priv) ? 2 : 1; - - /* read the first set of memory latencies[0:3] */ - val = 0; /* data0 to be programmed to 0 for first set */ - ret = snb_pcode_read(&dev_priv->uncore, GEN9_PCODE_READ_MEM_LATENCY, - &val, NULL); + u64 sskpd; - if (ret) { - drm_err(&dev_priv->drm, - "SKL Mailbox read error = %d\n", ret); - return; - } - - wm[0] = (val & GEN9_MEM_LATENCY_LEVEL_MASK) * mult; - wm[1] = ((val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) & - GEN9_MEM_LATENCY_LEVEL_MASK) * mult; - wm[2] = ((val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) & - GEN9_MEM_LATENCY_LEVEL_MASK) * mult; - wm[3] = ((val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) & - GEN9_MEM_LATENCY_LEVEL_MASK) * mult; - - /* read the second set of memory latencies[4:7] */ - val = 1; /* data0 to be programmed to 1 for second set */ - ret = snb_pcode_read(&dev_priv->uncore, GEN9_PCODE_READ_MEM_LATENCY, - &val, NULL); - if (ret) { - drm_err(&dev_priv->drm, - "SKL Mailbox read error = %d\n", ret); - return; - } + sskpd = intel_uncore_read64(&i915->uncore, MCH_SSKPD); - wm[4] = (val & GEN9_MEM_LATENCY_LEVEL_MASK) * mult; - wm[5] = ((val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) & - GEN9_MEM_LATENCY_LEVEL_MASK) * mult; - wm[6] = ((val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) & - GEN9_MEM_LATENCY_LEVEL_MASK) * mult; - wm[7] = ((val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) & - GEN9_MEM_LATENCY_LEVEL_MASK) * mult; + wm[0] = REG_FIELD_GET64(SSKPD_NEW_WM0_MASK_HSW, sskpd); + if (wm[0] == 0) + wm[0] = REG_FIELD_GET64(SSKPD_OLD_WM0_MASK_HSW, sskpd); + wm[1] = REG_FIELD_GET64(SSKPD_WM1_MASK_HSW, sskpd); + wm[2] = REG_FIELD_GET64(SSKPD_WM2_MASK_HSW, sskpd); + wm[3] = REG_FIELD_GET64(SSKPD_WM3_MASK_HSW, sskpd); + wm[4] = REG_FIELD_GET64(SSKPD_WM4_MASK_HSW, sskpd); +} - /* - * If a level n (n > 1) has a 0us latency, all levels m (m >= n) - * need to be disabled. We make sure to sanitize the values out - * of the punit to satisfy this requirement. - */ - for (level = 1; level <= max_level; level++) { - if (wm[level] == 0) { - for (i = level + 1; i <= max_level; i++) - wm[i] = 0; +static void snb_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) +{ + u32 sskpd; - max_level = level - 1; + sskpd = intel_uncore_read(&i915->uncore, MCH_SSKPD); - break; - } - } + wm[0] = REG_FIELD_GET(SSKPD_WM0_MASK_SNB, sskpd); + wm[1] = REG_FIELD_GET(SSKPD_WM1_MASK_SNB, sskpd); + wm[2] = REG_FIELD_GET(SSKPD_WM2_MASK_SNB, sskpd); + wm[3] = REG_FIELD_GET(SSKPD_WM3_MASK_SNB, sskpd); +} - /* - * WaWmMemoryReadLatency - * - * punit doesn't take into account the read latency so we need - * to add proper adjustement to each valid level we retrieve - * from the punit when level 0 response data is 0us. - */ - if (wm[0] == 0) { - u8 adjust = DISPLAY_VER(dev_priv) >= 12 ? 3 : 2; +static void ilk_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) +{ + u32 mltr; - for (level = 0; level <= max_level; level++) - wm[level] += adjust; - } + mltr = intel_uncore_read(&i915->uncore, MLTR_ILK); - /* - * WA Level-0 adjustment for 16GB DIMMs: SKL+ - * If we could not get dimm info enable this WA to prevent from - * any underrun. If not able to get Dimm info assume 16GB dimm - * to avoid any underrun. - */ - if (dev_priv->dram_info.wm_lv_0_adjust_needed) - wm[0] += 1; - } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { - u64 sskpd = intel_uncore_read64(uncore, MCH_SSKPD); - - wm[0] = REG_FIELD_GET64(SSKPD_NEW_WM0_MASK_HSW, sskpd); - if (wm[0] == 0) - wm[0] = REG_FIELD_GET64(SSKPD_OLD_WM0_MASK_HSW, sskpd); - wm[1] = REG_FIELD_GET64(SSKPD_WM1_MASK_HSW, sskpd); - wm[2] = REG_FIELD_GET64(SSKPD_WM2_MASK_HSW, sskpd); - wm[3] = REG_FIELD_GET64(SSKPD_WM3_MASK_HSW, sskpd); - wm[4] = REG_FIELD_GET64(SSKPD_WM4_MASK_HSW, sskpd); - } else if (DISPLAY_VER(dev_priv) >= 6) { - u32 sskpd = intel_uncore_read(uncore, MCH_SSKPD); - - wm[0] = REG_FIELD_GET(SSKPD_WM0_MASK_SNB, sskpd); - wm[1] = REG_FIELD_GET(SSKPD_WM1_MASK_SNB, sskpd); - wm[2] = REG_FIELD_GET(SSKPD_WM2_MASK_SNB, sskpd); - wm[3] = REG_FIELD_GET(SSKPD_WM3_MASK_SNB, sskpd); - } else if (DISPLAY_VER(dev_priv) >= 5) { - u32 mltr = intel_uncore_read(uncore, MLTR_ILK); - - /* ILK primary LP0 latency is 700 ns */ - wm[0] = 7; - wm[1] = REG_FIELD_GET(MLTR_WM1_MASK, mltr); - wm[2] = REG_FIELD_GET(MLTR_WM2_MASK, mltr); - } else { - MISSING_CASE(INTEL_DEVID(dev_priv)); - } + /* ILK primary LP0 latency is 700 ns */ + wm[0] = 7; + wm[1] = REG_FIELD_GET(MLTR_WM1_MASK, mltr); + wm[2] = REG_FIELD_GET(MLTR_WM2_MASK, mltr); } static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv, @@ -3007,9 +2893,8 @@ int ilk_wm_max_level(const struct drm_i915_private *dev_priv) return 2; } -static void intel_print_wm_latency(struct drm_i915_private *dev_priv, - const char *name, - const u16 wm[]) +void intel_print_wm_latency(struct drm_i915_private *dev_priv, + const char *name, const u16 wm[]) { int level, max_level = ilk_wm_max_level(dev_priv); @@ -3061,18 +2946,18 @@ static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv) * The BIOS provided WM memory latency values are often * inadequate for high resolution displays. Adjust them. */ - changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12); - changed |= ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12); - changed |= ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12); + changed = ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.pri_latency, 12); + changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.spr_latency, 12); + changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.cur_latency, 12); if (!changed) return; drm_dbg_kms(&dev_priv->drm, "WM latency values increased to avoid potential underruns\n"); - intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency); - intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency); - intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); + intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency); + intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency); + intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency); } static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv) @@ -3088,37 +2973,42 @@ static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv) * interrupts only. To play it safe we disable LP3 * watermarks entirely. */ - if (dev_priv->wm.pri_latency[3] == 0 && - dev_priv->wm.spr_latency[3] == 0 && - dev_priv->wm.cur_latency[3] == 0) + if (dev_priv->display.wm.pri_latency[3] == 0 && + dev_priv->display.wm.spr_latency[3] == 0 && + dev_priv->display.wm.cur_latency[3] == 0) return; - dev_priv->wm.pri_latency[3] = 0; - dev_priv->wm.spr_latency[3] = 0; - dev_priv->wm.cur_latency[3] = 0; + dev_priv->display.wm.pri_latency[3] = 0; + dev_priv->display.wm.spr_latency[3] = 0; + dev_priv->display.wm.cur_latency[3] = 0; drm_dbg_kms(&dev_priv->drm, "LP3 watermarks disabled due to potential for lost interrupts\n"); - intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency); - intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency); - intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); + intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency); + intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency); + intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency); } static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv) { - intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency); + if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) + hsw_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency); + else if (DISPLAY_VER(dev_priv) >= 6) + snb_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency); + else + ilk_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency); - memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency, - sizeof(dev_priv->wm.pri_latency)); - memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency, - sizeof(dev_priv->wm.pri_latency)); + memcpy(dev_priv->display.wm.spr_latency, dev_priv->display.wm.pri_latency, + sizeof(dev_priv->display.wm.pri_latency)); + memcpy(dev_priv->display.wm.cur_latency, dev_priv->display.wm.pri_latency, + sizeof(dev_priv->display.wm.pri_latency)); - intel_fixup_spr_wm_latency(dev_priv, dev_priv->wm.spr_latency); - intel_fixup_cur_wm_latency(dev_priv, dev_priv->wm.cur_latency); + intel_fixup_spr_wm_latency(dev_priv, dev_priv->display.wm.spr_latency); + intel_fixup_cur_wm_latency(dev_priv, dev_priv->display.wm.cur_latency); - intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency); - intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency); - intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); + intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency); + intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency); + intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency); if (DISPLAY_VER(dev_priv) == 6) { snb_wm_latency_quirk(dev_priv); @@ -3126,12 +3016,6 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv) } } -static void skl_setup_wm_latency(struct drm_i915_private *dev_priv) -{ - intel_read_wm_latency(dev_priv, dev_priv->wm.skl_latency); - intel_print_wm_latency(dev_priv, "Gen9 Plane", dev_priv->wm.skl_latency); -} - static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv, struct intel_pipe_wm *pipe_wm) { @@ -3386,7 +3270,7 @@ static unsigned int ilk_wm_lp_latency(struct drm_i915_private *dev_priv, if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) return 2 * level; else - return dev_priv->wm.pri_latency[level]; + return dev_priv->display.wm.pri_latency[level]; } static void ilk_compute_wm_results(struct drm_i915_private *dev_priv, @@ -3538,7 +3422,7 @@ static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv, static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv, unsigned int dirty) { - struct ilk_wm_values *previous = &dev_priv->wm.hw; + struct ilk_wm_values *previous = &dev_priv->display.wm.hw; bool changed = false; if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM_LP_ENABLE) { @@ -3572,7 +3456,7 @@ static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv, static void ilk_write_wm_values(struct drm_i915_private *dev_priv, struct ilk_wm_values *results) { - struct ilk_wm_values *previous = &dev_priv->wm.hw; + struct ilk_wm_values *previous = &dev_priv->display.wm.hw; unsigned int dirty; u32 val; @@ -3634,7 +3518,7 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv, if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2]) intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, results->wm_lp[2]); - dev_priv->wm.hw = *results; + dev_priv->display.wm.hw = *results; } bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv) @@ -3642,2765 +3526,6 @@ bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv) return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL); } -u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *dev_priv) -{ - u8 enabled_slices = 0; - enum dbuf_slice slice; - - for_each_dbuf_slice(dev_priv, slice) { - if (intel_uncore_read(&dev_priv->uncore, - DBUF_CTL_S(slice)) & DBUF_POWER_STATE) - enabled_slices |= BIT(slice); - } - - return enabled_slices; -} - -/* - * FIXME: We still don't have the proper code detect if we need to apply the WA, - * so assume we'll always need it in order to avoid underruns. - */ -static bool skl_needs_memory_bw_wa(struct drm_i915_private *dev_priv) -{ - return DISPLAY_VER(dev_priv) == 9; -} - -static bool -intel_has_sagv(struct drm_i915_private *dev_priv) -{ - return DISPLAY_VER(dev_priv) >= 9 && !IS_LP(dev_priv) && - dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED; -} - -static u32 -intel_sagv_block_time(struct drm_i915_private *dev_priv) -{ - if (DISPLAY_VER(dev_priv) >= 12) { - u32 val = 0; - int ret; - - ret = snb_pcode_read(&dev_priv->uncore, - GEN12_PCODE_READ_SAGV_BLOCK_TIME_US, - &val, NULL); - if (ret) { - drm_dbg_kms(&dev_priv->drm, "Couldn't read SAGV block time!\n"); - return 0; - } - - return val; - } else if (DISPLAY_VER(dev_priv) == 11) { - return 10; - } else if (DISPLAY_VER(dev_priv) == 9 && !IS_LP(dev_priv)) { - return 30; - } else { - return 0; - } -} - -static void intel_sagv_init(struct drm_i915_private *i915) -{ - if (!intel_has_sagv(i915)) - i915->sagv_status = I915_SAGV_NOT_CONTROLLED; - - /* - * Probe to see if we have working SAGV control. - * For icl+ this was already determined by intel_bw_init_hw(). - */ - if (DISPLAY_VER(i915) < 11) - skl_sagv_disable(i915); - - drm_WARN_ON(&i915->drm, i915->sagv_status == I915_SAGV_UNKNOWN); - - i915->sagv_block_time_us = intel_sagv_block_time(i915); - - drm_dbg_kms(&i915->drm, "SAGV supported: %s, original SAGV block time: %u us\n", - str_yes_no(intel_has_sagv(i915)), i915->sagv_block_time_us); - - /* avoid overflow when adding with wm0 latency/etc. */ - if (drm_WARN(&i915->drm, i915->sagv_block_time_us > U16_MAX, - "Excessive SAGV block time %u, ignoring\n", - i915->sagv_block_time_us)) - i915->sagv_block_time_us = 0; - - if (!intel_has_sagv(i915)) - i915->sagv_block_time_us = 0; -} - -/* - * SAGV dynamically adjusts the system agent voltage and clock frequencies - * depending on power and performance requirements. The display engine access - * to system memory is blocked during the adjustment time. Because of the - * blocking time, having this enabled can cause full system hangs and/or pipe - * underruns if we don't meet all of the following requirements: - * - * - <= 1 pipe enabled - * - All planes can enable watermarks for latencies >= SAGV engine block time - * - We're not using an interlaced display configuration - */ -static void skl_sagv_enable(struct drm_i915_private *dev_priv) -{ - int ret; - - if (!intel_has_sagv(dev_priv)) - return; - - if (dev_priv->sagv_status == I915_SAGV_ENABLED) - return; - - drm_dbg_kms(&dev_priv->drm, "Enabling SAGV\n"); - ret = snb_pcode_write(&dev_priv->uncore, GEN9_PCODE_SAGV_CONTROL, - GEN9_SAGV_ENABLE); - - /* We don't need to wait for SAGV when enabling */ - - /* - * Some skl systems, pre-release machines in particular, - * don't actually have SAGV. - */ - if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) { - drm_dbg(&dev_priv->drm, "No SAGV found on system, ignoring\n"); - dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED; - return; - } else if (ret < 0) { - drm_err(&dev_priv->drm, "Failed to enable SAGV\n"); - return; - } - - dev_priv->sagv_status = I915_SAGV_ENABLED; -} - -static void skl_sagv_disable(struct drm_i915_private *dev_priv) -{ - int ret; - - if (!intel_has_sagv(dev_priv)) - return; - - if (dev_priv->sagv_status == I915_SAGV_DISABLED) - return; - - drm_dbg_kms(&dev_priv->drm, "Disabling SAGV\n"); - /* bspec says to keep retrying for at least 1 ms */ - ret = skl_pcode_request(&dev_priv->uncore, GEN9_PCODE_SAGV_CONTROL, - GEN9_SAGV_DISABLE, - GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED, - 1); - /* - * Some skl systems, pre-release machines in particular, - * don't actually have SAGV. - */ - if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) { - drm_dbg(&dev_priv->drm, "No SAGV found on system, ignoring\n"); - dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED; - return; - } else if (ret < 0) { - drm_err(&dev_priv->drm, "Failed to disable SAGV (%d)\n", ret); - return; - } - - dev_priv->sagv_status = I915_SAGV_DISABLED; -} - -static void skl_sagv_pre_plane_update(struct intel_atomic_state *state) -{ - struct drm_i915_private *i915 = to_i915(state->base.dev); - const struct intel_bw_state *new_bw_state = - intel_atomic_get_new_bw_state(state); - - if (!new_bw_state) - return; - - if (!intel_can_enable_sagv(i915, new_bw_state)) - skl_sagv_disable(i915); -} - -static void skl_sagv_post_plane_update(struct intel_atomic_state *state) -{ - struct drm_i915_private *i915 = to_i915(state->base.dev); - const struct intel_bw_state *new_bw_state = - intel_atomic_get_new_bw_state(state); - - if (!new_bw_state) - return; - - if (intel_can_enable_sagv(i915, new_bw_state)) - skl_sagv_enable(i915); -} - -static void icl_sagv_pre_plane_update(struct intel_atomic_state *state) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - const struct intel_bw_state *old_bw_state = - intel_atomic_get_old_bw_state(state); - const struct intel_bw_state *new_bw_state = - intel_atomic_get_new_bw_state(state); - u16 old_mask, new_mask; - - if (!new_bw_state) - return; - - old_mask = old_bw_state->qgv_points_mask; - new_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask; - - if (old_mask == new_mask) - return; - - WARN_ON(!new_bw_state->base.changed); - - drm_dbg_kms(&dev_priv->drm, "Restricting QGV points: 0x%x -> 0x%x\n", - old_mask, new_mask); - - /* - * Restrict required qgv points before updating the configuration. - * According to BSpec we can't mask and unmask qgv points at the same - * time. Also masking should be done before updating the configuration - * and unmasking afterwards. - */ - icl_pcode_restrict_qgv_points(dev_priv, new_mask); -} - -static void icl_sagv_post_plane_update(struct intel_atomic_state *state) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - const struct intel_bw_state *old_bw_state = - intel_atomic_get_old_bw_state(state); - const struct intel_bw_state *new_bw_state = - intel_atomic_get_new_bw_state(state); - u16 old_mask, new_mask; - - if (!new_bw_state) - return; - - old_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask; - new_mask = new_bw_state->qgv_points_mask; - - if (old_mask == new_mask) - return; - - WARN_ON(!new_bw_state->base.changed); - - drm_dbg_kms(&dev_priv->drm, "Relaxing QGV points: 0x%x -> 0x%x\n", - old_mask, new_mask); - - /* - * Allow required qgv points after updating the configuration. - * According to BSpec we can't mask and unmask qgv points at the same - * time. Also masking should be done before updating the configuration - * and unmasking afterwards. - */ - icl_pcode_restrict_qgv_points(dev_priv, new_mask); -} - -void intel_sagv_pre_plane_update(struct intel_atomic_state *state) -{ - struct drm_i915_private *i915 = to_i915(state->base.dev); - - /* - * Just return if we can't control SAGV or don't have it. - * This is different from situation when we have SAGV but just can't - * afford it due to DBuf limitation - in case if SAGV is completely - * disabled in a BIOS, we are not even allowed to send a PCode request, - * as it will throw an error. So have to check it here. - */ - if (!intel_has_sagv(i915)) - return; - - if (DISPLAY_VER(i915) >= 11) - icl_sagv_pre_plane_update(state); - else - skl_sagv_pre_plane_update(state); -} - -void intel_sagv_post_plane_update(struct intel_atomic_state *state) -{ - struct drm_i915_private *i915 = to_i915(state->base.dev); - - /* - * Just return if we can't control SAGV or don't have it. - * This is different from situation when we have SAGV but just can't - * afford it due to DBuf limitation - in case if SAGV is completely - * disabled in a BIOS, we are not even allowed to send a PCode request, - * as it will throw an error. So have to check it here. - */ - if (!intel_has_sagv(i915)) - return; - - if (DISPLAY_VER(i915) >= 11) - icl_sagv_post_plane_update(state); - else - skl_sagv_post_plane_update(state); -} - -static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum plane_id plane_id; - int max_level = INT_MAX; - - if (!intel_has_sagv(dev_priv)) - return false; - - if (!crtc_state->hw.active) - return true; - - if (crtc_state->hw.pipe_mode.flags & DRM_MODE_FLAG_INTERLACE) - return false; - - for_each_plane_id_on_crtc(crtc, plane_id) { - const struct skl_plane_wm *wm = - &crtc_state->wm.skl.optimal.planes[plane_id]; - int level; - - /* Skip this plane if it's not enabled */ - if (!wm->wm[0].enable) - continue; - - /* Find the highest enabled wm level for this plane */ - for (level = ilk_wm_max_level(dev_priv); - !wm->wm[level].enable; --level) - { } - - /* Highest common enabled wm level for all planes */ - max_level = min(level, max_level); - } - - /* No enabled planes? */ - if (max_level == INT_MAX) - return true; - - for_each_plane_id_on_crtc(crtc, plane_id) { - const struct skl_plane_wm *wm = - &crtc_state->wm.skl.optimal.planes[plane_id]; - - /* - * All enabled planes must have enabled a common wm level that - * can tolerate memory latencies higher than sagv_block_time_us - */ - if (wm->wm[0].enable && !wm->wm[max_level].can_sagv) - return false; - } - - return true; -} - -static bool tgl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - enum plane_id plane_id; - - if (!crtc_state->hw.active) - return true; - - for_each_plane_id_on_crtc(crtc, plane_id) { - const struct skl_plane_wm *wm = - &crtc_state->wm.skl.optimal.planes[plane_id]; - - if (wm->wm[0].enable && !wm->sagv.wm0.enable) - return false; - } - - return true; -} - -static bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - - if (DISPLAY_VER(dev_priv) >= 12) - return tgl_crtc_can_enable_sagv(crtc_state); - else - return skl_crtc_can_enable_sagv(crtc_state); -} - -bool intel_can_enable_sagv(struct drm_i915_private *dev_priv, - const struct intel_bw_state *bw_state) -{ - if (DISPLAY_VER(dev_priv) < 11 && - bw_state->active_pipes && !is_power_of_2(bw_state->active_pipes)) - return false; - - return bw_state->pipe_sagv_reject == 0; -} - -static int intel_compute_sagv_mask(struct intel_atomic_state *state) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - int ret; - struct intel_crtc *crtc; - struct intel_crtc_state *new_crtc_state; - struct intel_bw_state *new_bw_state = NULL; - const struct intel_bw_state *old_bw_state = NULL; - int i; - - for_each_new_intel_crtc_in_state(state, crtc, - new_crtc_state, i) { - new_bw_state = intel_atomic_get_bw_state(state); - if (IS_ERR(new_bw_state)) - return PTR_ERR(new_bw_state); - - old_bw_state = intel_atomic_get_old_bw_state(state); - - if (intel_crtc_can_enable_sagv(new_crtc_state)) - new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe); - else - new_bw_state->pipe_sagv_reject |= BIT(crtc->pipe); - } - - if (!new_bw_state) - return 0; - - new_bw_state->active_pipes = - intel_calc_active_pipes(state, old_bw_state->active_pipes); - - if (new_bw_state->active_pipes != old_bw_state->active_pipes) { - ret = intel_atomic_lock_global_state(&new_bw_state->base); - if (ret) - return ret; - } - - if (intel_can_enable_sagv(dev_priv, new_bw_state) != - intel_can_enable_sagv(dev_priv, old_bw_state)) { - ret = intel_atomic_serialize_global_state(&new_bw_state->base); - if (ret) - return ret; - } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) { - ret = intel_atomic_lock_global_state(&new_bw_state->base); - if (ret) - return ret; - } - - for_each_new_intel_crtc_in_state(state, crtc, - new_crtc_state, i) { - struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal; - - /* - * We store use_sagv_wm in the crtc state rather than relying on - * that bw state since we have no convenient way to get at the - * latter from the plane commit hooks (especially in the legacy - * cursor case) - */ - pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(dev_priv) && - DISPLAY_VER(dev_priv) >= 12 && - intel_can_enable_sagv(dev_priv, new_bw_state); - } - - return 0; -} - -static u16 skl_ddb_entry_init(struct skl_ddb_entry *entry, - u16 start, u16 end) -{ - entry->start = start; - entry->end = end; - - return end; -} - -static int intel_dbuf_slice_size(struct drm_i915_private *dev_priv) -{ - return INTEL_INFO(dev_priv)->display.dbuf.size / - hweight8(INTEL_INFO(dev_priv)->display.dbuf.slice_mask); -} - -static void -skl_ddb_entry_for_slices(struct drm_i915_private *dev_priv, u8 slice_mask, - struct skl_ddb_entry *ddb) -{ - int slice_size = intel_dbuf_slice_size(dev_priv); - - if (!slice_mask) { - ddb->start = 0; - ddb->end = 0; - return; - } - - ddb->start = (ffs(slice_mask) - 1) * slice_size; - ddb->end = fls(slice_mask) * slice_size; - - WARN_ON(ddb->start >= ddb->end); - WARN_ON(ddb->end > INTEL_INFO(dev_priv)->display.dbuf.size); -} - -static unsigned int mbus_ddb_offset(struct drm_i915_private *i915, u8 slice_mask) -{ - struct skl_ddb_entry ddb; - - if (slice_mask & (BIT(DBUF_S1) | BIT(DBUF_S2))) - slice_mask = BIT(DBUF_S1); - else if (slice_mask & (BIT(DBUF_S3) | BIT(DBUF_S4))) - slice_mask = BIT(DBUF_S3); - - skl_ddb_entry_for_slices(i915, slice_mask, &ddb); - - return ddb.start; -} - -u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *dev_priv, - const struct skl_ddb_entry *entry) -{ - int slice_size = intel_dbuf_slice_size(dev_priv); - enum dbuf_slice start_slice, end_slice; - u8 slice_mask = 0; - - if (!skl_ddb_entry_size(entry)) - return 0; - - start_slice = entry->start / slice_size; - end_slice = (entry->end - 1) / slice_size; - - /* - * Per plane DDB entry can in a really worst case be on multiple slices - * but single entry is anyway contigious. - */ - while (start_slice <= end_slice) { - slice_mask |= BIT(start_slice); - start_slice++; - } - - return slice_mask; -} - -static unsigned int intel_crtc_ddb_weight(const struct intel_crtc_state *crtc_state) -{ - const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; - int hdisplay, vdisplay; - - if (!crtc_state->hw.active) - return 0; - - /* - * Watermark/ddb requirement highly depends upon width of the - * framebuffer, So instead of allocating DDB equally among pipes - * distribute DDB based on resolution/width of the display. - */ - drm_mode_get_hv_timing(pipe_mode, &hdisplay, &vdisplay); - - return hdisplay; -} - -static void intel_crtc_dbuf_weights(const struct intel_dbuf_state *dbuf_state, - enum pipe for_pipe, - unsigned int *weight_start, - unsigned int *weight_end, - unsigned int *weight_total) -{ - struct drm_i915_private *dev_priv = - to_i915(dbuf_state->base.state->base.dev); - enum pipe pipe; - - *weight_start = 0; - *weight_end = 0; - *weight_total = 0; - - for_each_pipe(dev_priv, pipe) { - int weight = dbuf_state->weight[pipe]; - - /* - * Do not account pipes using other slice sets - * luckily as of current BSpec slice sets do not partially - * intersect(pipes share either same one slice or same slice set - * i.e no partial intersection), so it is enough to check for - * equality for now. - */ - if (dbuf_state->slices[pipe] != dbuf_state->slices[for_pipe]) - continue; - - *weight_total += weight; - if (pipe < for_pipe) { - *weight_start += weight; - *weight_end += weight; - } else if (pipe == for_pipe) { - *weight_end += weight; - } - } -} - -static int -skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - unsigned int weight_total, weight_start, weight_end; - const struct intel_dbuf_state *old_dbuf_state = - intel_atomic_get_old_dbuf_state(state); - struct intel_dbuf_state *new_dbuf_state = - intel_atomic_get_new_dbuf_state(state); - struct intel_crtc_state *crtc_state; - struct skl_ddb_entry ddb_slices; - enum pipe pipe = crtc->pipe; - unsigned int mbus_offset = 0; - u32 ddb_range_size; - u32 dbuf_slice_mask; - u32 start, end; - int ret; - - if (new_dbuf_state->weight[pipe] == 0) { - skl_ddb_entry_init(&new_dbuf_state->ddb[pipe], 0, 0); - goto out; - } - - dbuf_slice_mask = new_dbuf_state->slices[pipe]; - - skl_ddb_entry_for_slices(dev_priv, dbuf_slice_mask, &ddb_slices); - mbus_offset = mbus_ddb_offset(dev_priv, dbuf_slice_mask); - ddb_range_size = skl_ddb_entry_size(&ddb_slices); - - intel_crtc_dbuf_weights(new_dbuf_state, pipe, - &weight_start, &weight_end, &weight_total); - - start = ddb_range_size * weight_start / weight_total; - end = ddb_range_size * weight_end / weight_total; - - skl_ddb_entry_init(&new_dbuf_state->ddb[pipe], - ddb_slices.start - mbus_offset + start, - ddb_slices.start - mbus_offset + end); - -out: - if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe] && - skl_ddb_entry_equal(&old_dbuf_state->ddb[pipe], - &new_dbuf_state->ddb[pipe])) - return 0; - - ret = intel_atomic_lock_global_state(&new_dbuf_state->base); - if (ret) - return ret; - - crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); - if (IS_ERR(crtc_state)) - return PTR_ERR(crtc_state); - - /* - * Used for checking overlaps, so we need absolute - * offsets instead of MBUS relative offsets. - */ - crtc_state->wm.skl.ddb.start = mbus_offset + new_dbuf_state->ddb[pipe].start; - crtc_state->wm.skl.ddb.end = mbus_offset + new_dbuf_state->ddb[pipe].end; - - drm_dbg_kms(&dev_priv->drm, - "[CRTC:%d:%s] dbuf slices 0x%x -> 0x%x, ddb (%d - %d) -> (%d - %d), active pipes 0x%x -> 0x%x\n", - crtc->base.base.id, crtc->base.name, - old_dbuf_state->slices[pipe], new_dbuf_state->slices[pipe], - old_dbuf_state->ddb[pipe].start, old_dbuf_state->ddb[pipe].end, - new_dbuf_state->ddb[pipe].start, new_dbuf_state->ddb[pipe].end, - old_dbuf_state->active_pipes, new_dbuf_state->active_pipes); - - return 0; -} - -static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state, - int width, const struct drm_format_info *format, - u64 modifier, unsigned int rotation, - u32 plane_pixel_rate, struct skl_wm_params *wp, - int color_plane); - -static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, - struct intel_plane *plane, - int level, - unsigned int latency, - const struct skl_wm_params *wp, - const struct skl_wm_level *result_prev, - struct skl_wm_level *result /* out */); - -static unsigned int -skl_cursor_allocation(const struct intel_crtc_state *crtc_state, - int num_active) -{ - struct intel_plane *plane = to_intel_plane(crtc_state->uapi.crtc->cursor); - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - int level, max_level = ilk_wm_max_level(dev_priv); - struct skl_wm_level wm = {}; - int ret, min_ddb_alloc = 0; - struct skl_wm_params wp; - - ret = skl_compute_wm_params(crtc_state, 256, - drm_format_info(DRM_FORMAT_ARGB8888), - DRM_FORMAT_MOD_LINEAR, - DRM_MODE_ROTATE_0, - crtc_state->pixel_rate, &wp, 0); - drm_WARN_ON(&dev_priv->drm, ret); - - for (level = 0; level <= max_level; level++) { - unsigned int latency = dev_priv->wm.skl_latency[level]; - - skl_compute_plane_wm(crtc_state, plane, level, latency, &wp, &wm, &wm); - if (wm.min_ddb_alloc == U16_MAX) - break; - - min_ddb_alloc = wm.min_ddb_alloc; - } - - return max(num_active == 1 ? 32 : 8, min_ddb_alloc); -} - -static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg) -{ - skl_ddb_entry_init(entry, - REG_FIELD_GET(PLANE_BUF_START_MASK, reg), - REG_FIELD_GET(PLANE_BUF_END_MASK, reg)); - if (entry->end) - entry->end++; -} - -static void -skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv, - const enum pipe pipe, - const enum plane_id plane_id, - struct skl_ddb_entry *ddb, - struct skl_ddb_entry *ddb_y) -{ - u32 val; - - /* Cursor doesn't support NV12/planar, so no extra calculation needed */ - if (plane_id == PLANE_CURSOR) { - val = intel_uncore_read(&dev_priv->uncore, CUR_BUF_CFG(pipe)); - skl_ddb_entry_init_from_hw(ddb, val); - return; - } - - val = intel_uncore_read(&dev_priv->uncore, PLANE_BUF_CFG(pipe, plane_id)); - skl_ddb_entry_init_from_hw(ddb, val); - - if (DISPLAY_VER(dev_priv) >= 11) - return; - - val = intel_uncore_read(&dev_priv->uncore, PLANE_NV12_BUF_CFG(pipe, plane_id)); - skl_ddb_entry_init_from_hw(ddb_y, val); -} - -static void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc, - struct skl_ddb_entry *ddb, - struct skl_ddb_entry *ddb_y) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum intel_display_power_domain power_domain; - enum pipe pipe = crtc->pipe; - intel_wakeref_t wakeref; - enum plane_id plane_id; - - power_domain = POWER_DOMAIN_PIPE(pipe); - wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); - if (!wakeref) - return; - - for_each_plane_id_on_crtc(crtc, plane_id) - skl_ddb_get_hw_plane_state(dev_priv, pipe, - plane_id, - &ddb[plane_id], - &ddb_y[plane_id]); - - intel_display_power_put(dev_priv, power_domain, wakeref); -} - -struct dbuf_slice_conf_entry { - u8 active_pipes; - u8 dbuf_mask[I915_MAX_PIPES]; - bool join_mbus; -}; - -/* - * Table taken from Bspec 12716 - * Pipes do have some preferred DBuf slice affinity, - * plus there are some hardcoded requirements on how - * those should be distributed for multipipe scenarios. - * For more DBuf slices algorithm can get even more messy - * and less readable, so decided to use a table almost - * as is from BSpec itself - that way it is at least easier - * to compare, change and check. - */ -static const struct dbuf_slice_conf_entry icl_allowed_dbufs[] = -/* Autogenerated with igt/tools/intel_dbuf_map tool: */ -{ - { - .active_pipes = BIT(PIPE_A), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1), - }, - }, - { - .active_pipes = BIT(PIPE_B), - .dbuf_mask = { - [PIPE_B] = BIT(DBUF_S1), - }, - }, - { - .active_pipes = BIT(PIPE_A) | BIT(PIPE_B), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1), - [PIPE_B] = BIT(DBUF_S2), - }, - }, - { - .active_pipes = BIT(PIPE_C), - .dbuf_mask = { - [PIPE_C] = BIT(DBUF_S2), - }, - }, - { - .active_pipes = BIT(PIPE_A) | BIT(PIPE_C), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1), - [PIPE_C] = BIT(DBUF_S2), - }, - }, - { - .active_pipes = BIT(PIPE_B) | BIT(PIPE_C), - .dbuf_mask = { - [PIPE_B] = BIT(DBUF_S1), - [PIPE_C] = BIT(DBUF_S2), - }, - }, - { - .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1), - [PIPE_B] = BIT(DBUF_S1), - [PIPE_C] = BIT(DBUF_S2), - }, - }, - {} -}; - -/* - * Table taken from Bspec 49255 - * Pipes do have some preferred DBuf slice affinity, - * plus there are some hardcoded requirements on how - * those should be distributed for multipipe scenarios. - * For more DBuf slices algorithm can get even more messy - * and less readable, so decided to use a table almost - * as is from BSpec itself - that way it is at least easier - * to compare, change and check. - */ -static const struct dbuf_slice_conf_entry tgl_allowed_dbufs[] = -/* Autogenerated with igt/tools/intel_dbuf_map tool: */ -{ - { - .active_pipes = BIT(PIPE_A), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), - }, - }, - { - .active_pipes = BIT(PIPE_B), - .dbuf_mask = { - [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2), - }, - }, - { - .active_pipes = BIT(PIPE_A) | BIT(PIPE_B), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S2), - [PIPE_B] = BIT(DBUF_S1), - }, - }, - { - .active_pipes = BIT(PIPE_C), - .dbuf_mask = { - [PIPE_C] = BIT(DBUF_S2) | BIT(DBUF_S1), - }, - }, - { - .active_pipes = BIT(PIPE_A) | BIT(PIPE_C), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1), - [PIPE_C] = BIT(DBUF_S2), - }, - }, - { - .active_pipes = BIT(PIPE_B) | BIT(PIPE_C), - .dbuf_mask = { - [PIPE_B] = BIT(DBUF_S1), - [PIPE_C] = BIT(DBUF_S2), - }, - }, - { - .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1), - [PIPE_B] = BIT(DBUF_S1), - [PIPE_C] = BIT(DBUF_S2), - }, - }, - { - .active_pipes = BIT(PIPE_D), - .dbuf_mask = { - [PIPE_D] = BIT(DBUF_S2) | BIT(DBUF_S1), - }, - }, - { - .active_pipes = BIT(PIPE_A) | BIT(PIPE_D), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1), - [PIPE_D] = BIT(DBUF_S2), - }, - }, - { - .active_pipes = BIT(PIPE_B) | BIT(PIPE_D), - .dbuf_mask = { - [PIPE_B] = BIT(DBUF_S1), - [PIPE_D] = BIT(DBUF_S2), - }, - }, - { - .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1), - [PIPE_B] = BIT(DBUF_S1), - [PIPE_D] = BIT(DBUF_S2), - }, - }, - { - .active_pipes = BIT(PIPE_C) | BIT(PIPE_D), - .dbuf_mask = { - [PIPE_C] = BIT(DBUF_S1), - [PIPE_D] = BIT(DBUF_S2), - }, - }, - { - .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1), - [PIPE_C] = BIT(DBUF_S2), - [PIPE_D] = BIT(DBUF_S2), - }, - }, - { - .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), - .dbuf_mask = { - [PIPE_B] = BIT(DBUF_S1), - [PIPE_C] = BIT(DBUF_S2), - [PIPE_D] = BIT(DBUF_S2), - }, - }, - { - .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1), - [PIPE_B] = BIT(DBUF_S1), - [PIPE_C] = BIT(DBUF_S2), - [PIPE_D] = BIT(DBUF_S2), - }, - }, - {} -}; - -static const struct dbuf_slice_conf_entry dg2_allowed_dbufs[] = { - { - .active_pipes = BIT(PIPE_A), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), - }, - }, - { - .active_pipes = BIT(PIPE_B), - .dbuf_mask = { - [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2), - }, - }, - { - .active_pipes = BIT(PIPE_A) | BIT(PIPE_B), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1), - [PIPE_B] = BIT(DBUF_S2), - }, - }, - { - .active_pipes = BIT(PIPE_C), - .dbuf_mask = { - [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), - }, - }, - { - .active_pipes = BIT(PIPE_A) | BIT(PIPE_C), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), - [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), - }, - }, - { - .active_pipes = BIT(PIPE_B) | BIT(PIPE_C), - .dbuf_mask = { - [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2), - [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), - }, - }, - { - .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1), - [PIPE_B] = BIT(DBUF_S2), - [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), - }, - }, - { - .active_pipes = BIT(PIPE_D), - .dbuf_mask = { - [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4), - }, - }, - { - .active_pipes = BIT(PIPE_A) | BIT(PIPE_D), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), - [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4), - }, - }, - { - .active_pipes = BIT(PIPE_B) | BIT(PIPE_D), - .dbuf_mask = { - [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2), - [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4), - }, - }, - { - .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1), - [PIPE_B] = BIT(DBUF_S2), - [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4), - }, - }, - { - .active_pipes = BIT(PIPE_C) | BIT(PIPE_D), - .dbuf_mask = { - [PIPE_C] = BIT(DBUF_S3), - [PIPE_D] = BIT(DBUF_S4), - }, - }, - { - .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), - [PIPE_C] = BIT(DBUF_S3), - [PIPE_D] = BIT(DBUF_S4), - }, - }, - { - .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), - .dbuf_mask = { - [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2), - [PIPE_C] = BIT(DBUF_S3), - [PIPE_D] = BIT(DBUF_S4), - }, - }, - { - .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1), - [PIPE_B] = BIT(DBUF_S2), - [PIPE_C] = BIT(DBUF_S3), - [PIPE_D] = BIT(DBUF_S4), - }, - }, - {} -}; - -static const struct dbuf_slice_conf_entry adlp_allowed_dbufs[] = { - /* - * Keep the join_mbus cases first so check_mbus_joined() - * will prefer them over the !join_mbus cases. - */ - { - .active_pipes = BIT(PIPE_A), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4), - }, - .join_mbus = true, - }, - { - .active_pipes = BIT(PIPE_B), - .dbuf_mask = { - [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4), - }, - .join_mbus = true, - }, - { - .active_pipes = BIT(PIPE_A), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), - }, - .join_mbus = false, - }, - { - .active_pipes = BIT(PIPE_B), - .dbuf_mask = { - [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4), - }, - .join_mbus = false, - }, - { - .active_pipes = BIT(PIPE_A) | BIT(PIPE_B), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), - [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4), - }, - }, - { - .active_pipes = BIT(PIPE_C), - .dbuf_mask = { - [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), - }, - }, - { - .active_pipes = BIT(PIPE_A) | BIT(PIPE_C), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), - [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), - }, - }, - { - .active_pipes = BIT(PIPE_B) | BIT(PIPE_C), - .dbuf_mask = { - [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4), - [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), - }, - }, - { - .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), - [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4), - [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), - }, - }, - { - .active_pipes = BIT(PIPE_D), - .dbuf_mask = { - [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2), - }, - }, - { - .active_pipes = BIT(PIPE_A) | BIT(PIPE_D), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), - [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2), - }, - }, - { - .active_pipes = BIT(PIPE_B) | BIT(PIPE_D), - .dbuf_mask = { - [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4), - [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2), - }, - }, - { - .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), - [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4), - [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2), - }, - }, - { - .active_pipes = BIT(PIPE_C) | BIT(PIPE_D), - .dbuf_mask = { - [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), - [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2), - }, - }, - { - .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), - [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), - [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2), - }, - }, - { - .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), - .dbuf_mask = { - [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4), - [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), - [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2), - }, - }, - { - .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), - [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4), - [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), - [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2), - }, - }, - {} - -}; - -static bool check_mbus_joined(u8 active_pipes, - const struct dbuf_slice_conf_entry *dbuf_slices) -{ - int i; - - for (i = 0; dbuf_slices[i].active_pipes != 0; i++) { - if (dbuf_slices[i].active_pipes == active_pipes) - return dbuf_slices[i].join_mbus; - } - return false; -} - -static bool adlp_check_mbus_joined(u8 active_pipes) -{ - return check_mbus_joined(active_pipes, adlp_allowed_dbufs); -} - -static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus, - const struct dbuf_slice_conf_entry *dbuf_slices) -{ - int i; - - for (i = 0; dbuf_slices[i].active_pipes != 0; i++) { - if (dbuf_slices[i].active_pipes == active_pipes && - dbuf_slices[i].join_mbus == join_mbus) - return dbuf_slices[i].dbuf_mask[pipe]; - } - return 0; -} - -/* - * This function finds an entry with same enabled pipe configuration and - * returns correspondent DBuf slice mask as stated in BSpec for particular - * platform. - */ -static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus) -{ - /* - * FIXME: For ICL this is still a bit unclear as prev BSpec revision - * required calculating "pipe ratio" in order to determine - * if one or two slices can be used for single pipe configurations - * as additional constraint to the existing table. - * However based on recent info, it should be not "pipe ratio" - * but rather ratio between pixel_rate and cdclk with additional - * constants, so for now we are using only table until this is - * clarified. Also this is the reason why crtc_state param is - * still here - we will need it once those additional constraints - * pop up. - */ - return compute_dbuf_slices(pipe, active_pipes, join_mbus, - icl_allowed_dbufs); -} - -static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus) -{ - return compute_dbuf_slices(pipe, active_pipes, join_mbus, - tgl_allowed_dbufs); -} - -static u8 adlp_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus) -{ - return compute_dbuf_slices(pipe, active_pipes, join_mbus, - adlp_allowed_dbufs); -} - -static u8 dg2_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus) -{ - return compute_dbuf_slices(pipe, active_pipes, join_mbus, - dg2_allowed_dbufs); -} - -static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes, bool join_mbus) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum pipe pipe = crtc->pipe; - - if (IS_DG2(dev_priv)) - return dg2_compute_dbuf_slices(pipe, active_pipes, join_mbus); - else if (IS_ALDERLAKE_P(dev_priv)) - return adlp_compute_dbuf_slices(pipe, active_pipes, join_mbus); - else if (DISPLAY_VER(dev_priv) == 12) - return tgl_compute_dbuf_slices(pipe, active_pipes, join_mbus); - else if (DISPLAY_VER(dev_priv) == 11) - return icl_compute_dbuf_slices(pipe, active_pipes, join_mbus); - /* - * For anything else just return one slice yet. - * Should be extended for other platforms. - */ - return active_pipes & BIT(pipe) ? BIT(DBUF_S1) : 0; -} - -static bool -use_minimal_wm0_only(const struct intel_crtc_state *crtc_state, - struct intel_plane *plane) -{ - struct drm_i915_private *i915 = to_i915(plane->base.dev); - - return DISPLAY_VER(i915) >= 13 && - crtc_state->uapi.async_flip && - plane->async_flip; -} - -static u64 -skl_total_relative_data_rate(const struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); - enum plane_id plane_id; - u64 data_rate = 0; - - for_each_plane_id_on_crtc(crtc, plane_id) { - if (plane_id == PLANE_CURSOR) - continue; - - data_rate += crtc_state->rel_data_rate[plane_id]; - - if (DISPLAY_VER(i915) < 11) - data_rate += crtc_state->rel_data_rate_y[plane_id]; - } - - return data_rate; -} - -static const struct skl_wm_level * -skl_plane_wm_level(const struct skl_pipe_wm *pipe_wm, - enum plane_id plane_id, - int level) -{ - const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id]; - - if (level == 0 && pipe_wm->use_sagv_wm) - return &wm->sagv.wm0; - - return &wm->wm[level]; -} - -static const struct skl_wm_level * -skl_plane_trans_wm(const struct skl_pipe_wm *pipe_wm, - enum plane_id plane_id) -{ - const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id]; - - if (pipe_wm->use_sagv_wm) - return &wm->sagv.trans_wm; - - return &wm->trans_wm; -} - -/* - * We only disable the watermarks for each plane if - * they exceed the ddb allocation of said plane. This - * is done so that we don't end up touching cursor - * watermarks needlessly when some other plane reduces - * our max possible watermark level. - * - * Bspec has this to say about the PLANE_WM enable bit: - * "All the watermarks at this level for all enabled - * planes must be enabled before the level will be used." - * So this is actually safe to do. - */ -static void -skl_check_wm_level(struct skl_wm_level *wm, const struct skl_ddb_entry *ddb) -{ - if (wm->min_ddb_alloc > skl_ddb_entry_size(ddb)) - memset(wm, 0, sizeof(*wm)); -} - -static void -skl_check_nv12_wm_level(struct skl_wm_level *wm, struct skl_wm_level *uv_wm, - const struct skl_ddb_entry *ddb_y, const struct skl_ddb_entry *ddb) -{ - if (wm->min_ddb_alloc > skl_ddb_entry_size(ddb_y) || - uv_wm->min_ddb_alloc > skl_ddb_entry_size(ddb)) { - memset(wm, 0, sizeof(*wm)); - memset(uv_wm, 0, sizeof(*uv_wm)); - } -} - -static bool icl_need_wm1_wa(struct drm_i915_private *i915, - enum plane_id plane_id) -{ - /* - * Wa_1408961008:icl, ehl - * Wa_14012656716:tgl, adl - * Underruns with WM1+ disabled - */ - return DISPLAY_VER(i915) == 11 || - (IS_DISPLAY_VER(i915, 12, 13) && plane_id == PLANE_CURSOR); -} - -struct skl_plane_ddb_iter { - u64 data_rate; - u16 start, size; -}; - -static void -skl_allocate_plane_ddb(struct skl_plane_ddb_iter *iter, - struct skl_ddb_entry *ddb, - const struct skl_wm_level *wm, - u64 data_rate) -{ - u16 size, extra = 0; - - if (data_rate) { - extra = min_t(u16, iter->size, - DIV64_U64_ROUND_UP(iter->size * data_rate, - iter->data_rate)); - iter->size -= extra; - iter->data_rate -= data_rate; - } - - /* - * Keep ddb entry of all disabled planes explicitly zeroed - * to avoid skl_ddb_add_affected_planes() adding them to - * the state when other planes change their allocations. - */ - size = wm->min_ddb_alloc + extra; - if (size) - iter->start = skl_ddb_entry_init(ddb, iter->start, - iter->start + size); -} - -static int -skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_crtc_state *crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); - const struct intel_dbuf_state *dbuf_state = - intel_atomic_get_new_dbuf_state(state); - const struct skl_ddb_entry *alloc = &dbuf_state->ddb[crtc->pipe]; - int num_active = hweight8(dbuf_state->active_pipes); - struct skl_plane_ddb_iter iter; - enum plane_id plane_id; - u16 cursor_size; - u32 blocks; - int level; - - /* Clear the partitioning for disabled planes. */ - memset(crtc_state->wm.skl.plane_ddb, 0, sizeof(crtc_state->wm.skl.plane_ddb)); - memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y)); - - if (!crtc_state->hw.active) - return 0; - - iter.start = alloc->start; - iter.size = skl_ddb_entry_size(alloc); - if (iter.size == 0) - return 0; - - /* Allocate fixed number of blocks for cursor. */ - cursor_size = skl_cursor_allocation(crtc_state, num_active); - iter.size -= cursor_size; - skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb[PLANE_CURSOR], - alloc->end - cursor_size, alloc->end); - - iter.data_rate = skl_total_relative_data_rate(crtc_state); - - /* - * Find the highest watermark level for which we can satisfy the block - * requirement of active planes. - */ - for (level = ilk_wm_max_level(dev_priv); level >= 0; level--) { - blocks = 0; - for_each_plane_id_on_crtc(crtc, plane_id) { - const struct skl_plane_wm *wm = - &crtc_state->wm.skl.optimal.planes[plane_id]; - - if (plane_id == PLANE_CURSOR) { - const struct skl_ddb_entry *ddb = - &crtc_state->wm.skl.plane_ddb[plane_id]; - - if (wm->wm[level].min_ddb_alloc > skl_ddb_entry_size(ddb)) { - drm_WARN_ON(&dev_priv->drm, - wm->wm[level].min_ddb_alloc != U16_MAX); - blocks = U32_MAX; - break; - } - continue; - } - - blocks += wm->wm[level].min_ddb_alloc; - blocks += wm->uv_wm[level].min_ddb_alloc; - } - - if (blocks <= iter.size) { - iter.size -= blocks; - break; - } - } - - if (level < 0) { - drm_dbg_kms(&dev_priv->drm, - "Requested display configuration exceeds system DDB limitations"); - drm_dbg_kms(&dev_priv->drm, "minimum required %d/%d\n", - blocks, iter.size); - return -EINVAL; - } - - /* avoid the WARN later when we don't allocate any extra DDB */ - if (iter.data_rate == 0) - iter.size = 0; - - /* - * Grant each plane the blocks it requires at the highest achievable - * watermark level, plus an extra share of the leftover blocks - * proportional to its relative data rate. - */ - for_each_plane_id_on_crtc(crtc, plane_id) { - struct skl_ddb_entry *ddb = - &crtc_state->wm.skl.plane_ddb[plane_id]; - struct skl_ddb_entry *ddb_y = - &crtc_state->wm.skl.plane_ddb_y[plane_id]; - const struct skl_plane_wm *wm = - &crtc_state->wm.skl.optimal.planes[plane_id]; - - if (plane_id == PLANE_CURSOR) - continue; - - if (DISPLAY_VER(dev_priv) < 11 && - crtc_state->nv12_planes & BIT(plane_id)) { - skl_allocate_plane_ddb(&iter, ddb_y, &wm->wm[level], - crtc_state->rel_data_rate_y[plane_id]); - skl_allocate_plane_ddb(&iter, ddb, &wm->uv_wm[level], - crtc_state->rel_data_rate[plane_id]); - } else { - skl_allocate_plane_ddb(&iter, ddb, &wm->wm[level], - crtc_state->rel_data_rate[plane_id]); - } - } - drm_WARN_ON(&dev_priv->drm, iter.size != 0 || iter.data_rate != 0); - - /* - * When we calculated watermark values we didn't know how high - * of a level we'd actually be able to hit, so we just marked - * all levels as "enabled." Go back now and disable the ones - * that aren't actually possible. - */ - for (level++; level <= ilk_wm_max_level(dev_priv); level++) { - for_each_plane_id_on_crtc(crtc, plane_id) { - const struct skl_ddb_entry *ddb = - &crtc_state->wm.skl.plane_ddb[plane_id]; - const struct skl_ddb_entry *ddb_y = - &crtc_state->wm.skl.plane_ddb_y[plane_id]; - struct skl_plane_wm *wm = - &crtc_state->wm.skl.optimal.planes[plane_id]; - - if (DISPLAY_VER(dev_priv) < 11 && - crtc_state->nv12_planes & BIT(plane_id)) - skl_check_nv12_wm_level(&wm->wm[level], - &wm->uv_wm[level], - ddb_y, ddb); - else - skl_check_wm_level(&wm->wm[level], ddb); - - if (icl_need_wm1_wa(dev_priv, plane_id) && - level == 1 && wm->wm[0].enable) { - wm->wm[level].blocks = wm->wm[0].blocks; - wm->wm[level].lines = wm->wm[0].lines; - wm->wm[level].ignore_lines = wm->wm[0].ignore_lines; - } - } - } - - /* - * Go back and disable the transition and SAGV watermarks - * if it turns out we don't have enough DDB blocks for them. - */ - for_each_plane_id_on_crtc(crtc, plane_id) { - const struct skl_ddb_entry *ddb = - &crtc_state->wm.skl.plane_ddb[plane_id]; - const struct skl_ddb_entry *ddb_y = - &crtc_state->wm.skl.plane_ddb_y[plane_id]; - struct skl_plane_wm *wm = - &crtc_state->wm.skl.optimal.planes[plane_id]; - - if (DISPLAY_VER(dev_priv) < 11 && - crtc_state->nv12_planes & BIT(plane_id)) { - skl_check_wm_level(&wm->trans_wm, ddb_y); - } else { - WARN_ON(skl_ddb_entry_size(ddb_y)); - - skl_check_wm_level(&wm->trans_wm, ddb); - } - - skl_check_wm_level(&wm->sagv.wm0, ddb); - skl_check_wm_level(&wm->sagv.trans_wm, ddb); - } - - return 0; -} - -/* - * The max latency should be 257 (max the punit can code is 255 and we add 2us - * for the read latency) and cpp should always be <= 8, so that - * should allow pixel_rate up to ~2 GHz which seems sufficient since max - * 2xcdclk is 1350 MHz and the pixel rate should never exceed that. -*/ -static uint_fixed_16_16_t -skl_wm_method1(const struct drm_i915_private *dev_priv, u32 pixel_rate, - u8 cpp, u32 latency, u32 dbuf_block_size) -{ - u32 wm_intermediate_val; - uint_fixed_16_16_t ret; - - if (latency == 0) - return FP_16_16_MAX; - - wm_intermediate_val = latency * pixel_rate * cpp; - ret = div_fixed16(wm_intermediate_val, 1000 * dbuf_block_size); - - if (DISPLAY_VER(dev_priv) >= 10) - ret = add_fixed16_u32(ret, 1); - - return ret; -} - -static uint_fixed_16_16_t -skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency, - uint_fixed_16_16_t plane_blocks_per_line) -{ - u32 wm_intermediate_val; - uint_fixed_16_16_t ret; - - if (latency == 0) - return FP_16_16_MAX; - - wm_intermediate_val = latency * pixel_rate; - wm_intermediate_val = DIV_ROUND_UP(wm_intermediate_val, - pipe_htotal * 1000); - ret = mul_u32_fixed16(wm_intermediate_val, plane_blocks_per_line); - return ret; -} - -static uint_fixed_16_16_t -intel_get_linetime_us(const struct intel_crtc_state *crtc_state) -{ - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - u32 pixel_rate; - u32 crtc_htotal; - uint_fixed_16_16_t linetime_us; - - if (!crtc_state->hw.active) - return u32_to_fixed16(0); - - pixel_rate = crtc_state->pixel_rate; - - if (drm_WARN_ON(&dev_priv->drm, pixel_rate == 0)) - return u32_to_fixed16(0); - - crtc_htotal = crtc_state->hw.pipe_mode.crtc_htotal; - linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate); - - return linetime_us; -} - -static int -skl_compute_wm_params(const struct intel_crtc_state *crtc_state, - int width, const struct drm_format_info *format, - u64 modifier, unsigned int rotation, - u32 plane_pixel_rate, struct skl_wm_params *wp, - int color_plane) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - u32 interm_pbpl; - - /* only planar format has two planes */ - if (color_plane == 1 && - !intel_format_info_is_yuv_semiplanar(format, modifier)) { - drm_dbg_kms(&dev_priv->drm, - "Non planar format have single plane\n"); - return -EINVAL; - } - - wp->y_tiled = modifier == I915_FORMAT_MOD_Y_TILED || - modifier == I915_FORMAT_MOD_4_TILED || - modifier == I915_FORMAT_MOD_Yf_TILED || - modifier == I915_FORMAT_MOD_Y_TILED_CCS || - modifier == I915_FORMAT_MOD_Yf_TILED_CCS; - wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED; - wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS || - modifier == I915_FORMAT_MOD_Yf_TILED_CCS; - wp->is_planar = intel_format_info_is_yuv_semiplanar(format, modifier); - - wp->width = width; - if (color_plane == 1 && wp->is_planar) - wp->width /= 2; - - wp->cpp = format->cpp[color_plane]; - wp->plane_pixel_rate = plane_pixel_rate; - - if (DISPLAY_VER(dev_priv) >= 11 && - modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 1) - wp->dbuf_block_size = 256; - else - wp->dbuf_block_size = 512; - - if (drm_rotation_90_or_270(rotation)) { - switch (wp->cpp) { - case 1: - wp->y_min_scanlines = 16; - break; - case 2: - wp->y_min_scanlines = 8; - break; - case 4: - wp->y_min_scanlines = 4; - break; - default: - MISSING_CASE(wp->cpp); - return -EINVAL; - } - } else { - wp->y_min_scanlines = 4; - } - - if (skl_needs_memory_bw_wa(dev_priv)) - wp->y_min_scanlines *= 2; - - wp->plane_bytes_per_line = wp->width * wp->cpp; - if (wp->y_tiled) { - interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line * - wp->y_min_scanlines, - wp->dbuf_block_size); - - if (DISPLAY_VER(dev_priv) >= 10) - interm_pbpl++; - - wp->plane_blocks_per_line = div_fixed16(interm_pbpl, - wp->y_min_scanlines); - } else { - interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line, - wp->dbuf_block_size); - - if (!wp->x_tiled || DISPLAY_VER(dev_priv) >= 10) - interm_pbpl++; - - wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl); - } - - wp->y_tile_minimum = mul_u32_fixed16(wp->y_min_scanlines, - wp->plane_blocks_per_line); - - wp->linetime_us = fixed16_to_u32_round_up( - intel_get_linetime_us(crtc_state)); - - return 0; -} - -static int -skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state, - struct skl_wm_params *wp, int color_plane) -{ - const struct drm_framebuffer *fb = plane_state->hw.fb; - int width; - - /* - * Src coordinates are already rotated by 270 degrees for - * the 90/270 degree plane rotation cases (to match the - * GTT mapping), hence no need to account for rotation here. - */ - width = drm_rect_width(&plane_state->uapi.src) >> 16; - - return skl_compute_wm_params(crtc_state, width, - fb->format, fb->modifier, - plane_state->hw.rotation, - intel_plane_pixel_rate(crtc_state, plane_state), - wp, color_plane); -} - -static bool skl_wm_has_lines(struct drm_i915_private *dev_priv, int level) -{ - if (DISPLAY_VER(dev_priv) >= 10) - return true; - - /* The number of lines are ignored for the level 0 watermark. */ - return level > 0; -} - -static int skl_wm_max_lines(struct drm_i915_private *dev_priv) -{ - if (DISPLAY_VER(dev_priv) >= 13) - return 255; - else - return 31; -} - -static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, - struct intel_plane *plane, - int level, - unsigned int latency, - const struct skl_wm_params *wp, - const struct skl_wm_level *result_prev, - struct skl_wm_level *result /* out */) -{ - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - uint_fixed_16_16_t method1, method2; - uint_fixed_16_16_t selected_result; - u32 blocks, lines, min_ddb_alloc = 0; - - if (latency == 0 || - (use_minimal_wm0_only(crtc_state, plane) && level > 0)) { - /* reject it */ - result->min_ddb_alloc = U16_MAX; - return; - } - - /* - * WaIncreaseLatencyIPCEnabled: kbl,cfl - * Display WA #1141: kbl,cfl - */ - if ((IS_KABYLAKE(dev_priv) || - IS_COFFEELAKE(dev_priv) || - IS_COMETLAKE(dev_priv)) && - dev_priv->ipc_enabled) - latency += 4; - - if (skl_needs_memory_bw_wa(dev_priv) && wp->x_tiled) - latency += 15; - - method1 = skl_wm_method1(dev_priv, wp->plane_pixel_rate, - wp->cpp, latency, wp->dbuf_block_size); - method2 = skl_wm_method2(wp->plane_pixel_rate, - crtc_state->hw.pipe_mode.crtc_htotal, - latency, - wp->plane_blocks_per_line); - - if (wp->y_tiled) { - selected_result = max_fixed16(method2, wp->y_tile_minimum); - } else { - if ((wp->cpp * crtc_state->hw.pipe_mode.crtc_htotal / - wp->dbuf_block_size < 1) && - (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) { - selected_result = method2; - } else if (latency >= wp->linetime_us) { - if (DISPLAY_VER(dev_priv) == 9) - selected_result = min_fixed16(method1, method2); - else - selected_result = method2; - } else { - selected_result = method1; - } - } - - blocks = fixed16_to_u32_round_up(selected_result) + 1; - /* - * Lets have blocks at minimum equivalent to plane_blocks_per_line - * as there will be at minimum one line for lines configuration. This - * is a work around for FIFO underruns observed with resolutions like - * 4k 60 Hz in single channel DRAM configurations. - * - * As per the Bspec 49325, if the ddb allocation can hold at least - * one plane_blocks_per_line, we should have selected method2 in - * the above logic. Assuming that modern versions have enough dbuf - * and method2 guarantees blocks equivalent to at least 1 line, - * select the blocks as plane_blocks_per_line. - * - * TODO: Revisit the logic when we have better understanding on DRAM - * channels' impact on the level 0 memory latency and the relevant - * wm calculations. - */ - if (skl_wm_has_lines(dev_priv, level)) - blocks = max(blocks, - fixed16_to_u32_round_up(wp->plane_blocks_per_line)); - lines = div_round_up_fixed16(selected_result, - wp->plane_blocks_per_line); - - if (DISPLAY_VER(dev_priv) == 9) { - /* Display WA #1125: skl,bxt,kbl */ - if (level == 0 && wp->rc_surface) - blocks += fixed16_to_u32_round_up(wp->y_tile_minimum); - - /* Display WA #1126: skl,bxt,kbl */ - if (level >= 1 && level <= 7) { - if (wp->y_tiled) { - blocks += fixed16_to_u32_round_up(wp->y_tile_minimum); - lines += wp->y_min_scanlines; - } else { - blocks++; - } - - /* - * Make sure result blocks for higher latency levels are - * atleast as high as level below the current level. - * Assumption in DDB algorithm optimization for special - * cases. Also covers Display WA #1125 for RC. - */ - if (result_prev->blocks > blocks) - blocks = result_prev->blocks; - } - } - - if (DISPLAY_VER(dev_priv) >= 11) { - if (wp->y_tiled) { - int extra_lines; - - if (lines % wp->y_min_scanlines == 0) - extra_lines = wp->y_min_scanlines; - else - extra_lines = wp->y_min_scanlines * 2 - - lines % wp->y_min_scanlines; - - min_ddb_alloc = mul_round_up_u32_fixed16(lines + extra_lines, - wp->plane_blocks_per_line); - } else { - min_ddb_alloc = blocks + DIV_ROUND_UP(blocks, 10); - } - } - - if (!skl_wm_has_lines(dev_priv, level)) - lines = 0; - - if (lines > skl_wm_max_lines(dev_priv)) { - /* reject it */ - result->min_ddb_alloc = U16_MAX; - return; - } - - /* - * If lines is valid, assume we can use this watermark level - * for now. We'll come back and disable it after we calculate the - * DDB allocation if it turns out we don't actually have enough - * blocks to satisfy it. - */ - result->blocks = blocks; - result->lines = lines; - /* Bspec says: value >= plane ddb allocation -> invalid, hence the +1 here */ - result->min_ddb_alloc = max(min_ddb_alloc, blocks) + 1; - result->enable = true; - - if (DISPLAY_VER(dev_priv) < 12 && dev_priv->sagv_block_time_us) - result->can_sagv = latency >= dev_priv->sagv_block_time_us; -} - -static void -skl_compute_wm_levels(const struct intel_crtc_state *crtc_state, - struct intel_plane *plane, - const struct skl_wm_params *wm_params, - struct skl_wm_level *levels) -{ - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - int level, max_level = ilk_wm_max_level(dev_priv); - struct skl_wm_level *result_prev = &levels[0]; - - for (level = 0; level <= max_level; level++) { - struct skl_wm_level *result = &levels[level]; - unsigned int latency = dev_priv->wm.skl_latency[level]; - - skl_compute_plane_wm(crtc_state, plane, level, latency, - wm_params, result_prev, result); - - result_prev = result; - } -} - -static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state, - struct intel_plane *plane, - const struct skl_wm_params *wm_params, - struct skl_plane_wm *plane_wm) -{ - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - struct skl_wm_level *sagv_wm = &plane_wm->sagv.wm0; - struct skl_wm_level *levels = plane_wm->wm; - unsigned int latency = 0; - - if (dev_priv->sagv_block_time_us) - latency = dev_priv->sagv_block_time_us + dev_priv->wm.skl_latency[0]; - - skl_compute_plane_wm(crtc_state, plane, 0, latency, - wm_params, &levels[0], - sagv_wm); -} - -static void skl_compute_transition_wm(struct drm_i915_private *dev_priv, - struct skl_wm_level *trans_wm, - const struct skl_wm_level *wm0, - const struct skl_wm_params *wp) -{ - u16 trans_min, trans_amount, trans_y_tile_min; - u16 wm0_blocks, trans_offset, blocks; - - /* Transition WM don't make any sense if ipc is disabled */ - if (!dev_priv->ipc_enabled) - return; - - /* - * WaDisableTWM:skl,kbl,cfl,bxt - * Transition WM are not recommended by HW team for GEN9 - */ - if (DISPLAY_VER(dev_priv) == 9) - return; - - if (DISPLAY_VER(dev_priv) >= 11) - trans_min = 4; - else - trans_min = 14; - - /* Display WA #1140: glk,cnl */ - if (DISPLAY_VER(dev_priv) == 10) - trans_amount = 0; - else - trans_amount = 10; /* This is configurable amount */ - - trans_offset = trans_min + trans_amount; - - /* - * The spec asks for Selected Result Blocks for wm0 (the real value), - * not Result Blocks (the integer value). Pay attention to the capital - * letters. The value wm_l0->blocks is actually Result Blocks, but - * since Result Blocks is the ceiling of Selected Result Blocks plus 1, - * and since we later will have to get the ceiling of the sum in the - * transition watermarks calculation, we can just pretend Selected - * Result Blocks is Result Blocks minus 1 and it should work for the - * current platforms. - */ - wm0_blocks = wm0->blocks - 1; - - if (wp->y_tiled) { - trans_y_tile_min = - (u16)mul_round_up_u32_fixed16(2, wp->y_tile_minimum); - blocks = max(wm0_blocks, trans_y_tile_min) + trans_offset; - } else { - blocks = wm0_blocks + trans_offset; - } - blocks++; - - /* - * Just assume we can enable the transition watermark. After - * computing the DDB we'll come back and disable it if that - * assumption turns out to be false. - */ - trans_wm->blocks = blocks; - trans_wm->min_ddb_alloc = max_t(u16, wm0->min_ddb_alloc, blocks + 1); - trans_wm->enable = true; -} - -static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state, - struct intel_plane *plane, int color_plane) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id]; - struct skl_wm_params wm_params; - int ret; - - ret = skl_compute_plane_wm_params(crtc_state, plane_state, - &wm_params, color_plane); - if (ret) - return ret; - - skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->wm); - - skl_compute_transition_wm(dev_priv, &wm->trans_wm, - &wm->wm[0], &wm_params); - - if (DISPLAY_VER(dev_priv) >= 12) { - tgl_compute_sagv_wm(crtc_state, plane, &wm_params, wm); - - skl_compute_transition_wm(dev_priv, &wm->sagv.trans_wm, - &wm->sagv.wm0, &wm_params); - } - - return 0; -} - -static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state, - struct intel_plane *plane) -{ - struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id]; - struct skl_wm_params wm_params; - int ret; - - wm->is_planar = true; - - /* uv plane watermarks must also be validated for NV12/Planar */ - ret = skl_compute_plane_wm_params(crtc_state, plane_state, - &wm_params, 1); - if (ret) - return ret; - - skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->uv_wm); - - return 0; -} - -static int skl_build_plane_wm(struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) -{ - struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); - enum plane_id plane_id = plane->id; - struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id]; - const struct drm_framebuffer *fb = plane_state->hw.fb; - int ret; - - memset(wm, 0, sizeof(*wm)); - - if (!intel_wm_plane_visible(crtc_state, plane_state)) - return 0; - - ret = skl_build_plane_wm_single(crtc_state, plane_state, - plane, 0); - if (ret) - return ret; - - if (fb->format->is_yuv && fb->format->num_planes > 1) { - ret = skl_build_plane_wm_uv(crtc_state, plane_state, - plane); - if (ret) - return ret; - } - - return 0; -} - -static int icl_build_plane_wm(struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) -{ - struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - enum plane_id plane_id = plane->id; - struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id]; - int ret; - - /* Watermarks calculated in master */ - if (plane_state->planar_slave) - return 0; - - memset(wm, 0, sizeof(*wm)); - - if (plane_state->planar_linked_plane) { - const struct drm_framebuffer *fb = plane_state->hw.fb; - - drm_WARN_ON(&dev_priv->drm, - !intel_wm_plane_visible(crtc_state, plane_state)); - drm_WARN_ON(&dev_priv->drm, !fb->format->is_yuv || - fb->format->num_planes == 1); - - ret = skl_build_plane_wm_single(crtc_state, plane_state, - plane_state->planar_linked_plane, 0); - if (ret) - return ret; - - ret = skl_build_plane_wm_single(crtc_state, plane_state, - plane, 1); - if (ret) - return ret; - } else if (intel_wm_plane_visible(crtc_state, plane_state)) { - ret = skl_build_plane_wm_single(crtc_state, plane_state, - plane, 0); - if (ret) - return ret; - } - - return 0; -} - -static int skl_build_pipe_wm(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_crtc_state *crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); - const struct intel_plane_state *plane_state; - struct intel_plane *plane; - int ret, i; - - for_each_new_intel_plane_in_state(state, plane, plane_state, i) { - /* - * FIXME should perhaps check {old,new}_plane_crtc->hw.crtc - * instead but we don't populate that correctly for NV12 Y - * planes so for now hack this. - */ - if (plane->pipe != crtc->pipe) - continue; - - if (DISPLAY_VER(dev_priv) >= 11) - ret = icl_build_plane_wm(crtc_state, plane_state); - else - ret = skl_build_plane_wm(crtc_state, plane_state); - if (ret) - return ret; - } - - crtc_state->wm.skl.optimal = crtc_state->wm.skl.raw; - - return 0; -} - -static void skl_ddb_entry_write(struct drm_i915_private *dev_priv, - i915_reg_t reg, - const struct skl_ddb_entry *entry) -{ - if (entry->end) - intel_de_write_fw(dev_priv, reg, - PLANE_BUF_END(entry->end - 1) | - PLANE_BUF_START(entry->start)); - else - intel_de_write_fw(dev_priv, reg, 0); -} - -static void skl_write_wm_level(struct drm_i915_private *dev_priv, - i915_reg_t reg, - const struct skl_wm_level *level) -{ - u32 val = 0; - - if (level->enable) - val |= PLANE_WM_EN; - if (level->ignore_lines) - val |= PLANE_WM_IGNORE_LINES; - val |= REG_FIELD_PREP(PLANE_WM_BLOCKS_MASK, level->blocks); - val |= REG_FIELD_PREP(PLANE_WM_LINES_MASK, level->lines); - - intel_de_write_fw(dev_priv, reg, val); -} - -void skl_write_plane_wm(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state) -{ - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - int level, max_level = ilk_wm_max_level(dev_priv); - enum plane_id plane_id = plane->id; - enum pipe pipe = plane->pipe; - const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal; - const struct skl_ddb_entry *ddb = - &crtc_state->wm.skl.plane_ddb[plane_id]; - const struct skl_ddb_entry *ddb_y = - &crtc_state->wm.skl.plane_ddb_y[plane_id]; - - for (level = 0; level <= max_level; level++) - skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level), - skl_plane_wm_level(pipe_wm, plane_id, level)); - - skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id), - skl_plane_trans_wm(pipe_wm, plane_id)); - - if (HAS_HW_SAGV_WM(dev_priv)) { - const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id]; - - skl_write_wm_level(dev_priv, PLANE_WM_SAGV(pipe, plane_id), - &wm->sagv.wm0); - skl_write_wm_level(dev_priv, PLANE_WM_SAGV_TRANS(pipe, plane_id), - &wm->sagv.trans_wm); - } - - skl_ddb_entry_write(dev_priv, - PLANE_BUF_CFG(pipe, plane_id), ddb); - - if (DISPLAY_VER(dev_priv) < 11) - skl_ddb_entry_write(dev_priv, - PLANE_NV12_BUF_CFG(pipe, plane_id), ddb_y); -} - -void skl_write_cursor_wm(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state) -{ - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - int level, max_level = ilk_wm_max_level(dev_priv); - enum plane_id plane_id = plane->id; - enum pipe pipe = plane->pipe; - const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal; - const struct skl_ddb_entry *ddb = - &crtc_state->wm.skl.plane_ddb[plane_id]; - - for (level = 0; level <= max_level; level++) - skl_write_wm_level(dev_priv, CUR_WM(pipe, level), - skl_plane_wm_level(pipe_wm, plane_id, level)); - - skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe), - skl_plane_trans_wm(pipe_wm, plane_id)); - - if (HAS_HW_SAGV_WM(dev_priv)) { - const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id]; - - skl_write_wm_level(dev_priv, CUR_WM_SAGV(pipe), - &wm->sagv.wm0); - skl_write_wm_level(dev_priv, CUR_WM_SAGV_TRANS(pipe), - &wm->sagv.trans_wm); - } - - skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), ddb); -} - -static bool skl_wm_level_equals(const struct skl_wm_level *l1, - const struct skl_wm_level *l2) -{ - return l1->enable == l2->enable && - l1->ignore_lines == l2->ignore_lines && - l1->lines == l2->lines && - l1->blocks == l2->blocks; -} - -static bool skl_plane_wm_equals(struct drm_i915_private *dev_priv, - const struct skl_plane_wm *wm1, - const struct skl_plane_wm *wm2) -{ - int level, max_level = ilk_wm_max_level(dev_priv); - - for (level = 0; level <= max_level; level++) { - /* - * We don't check uv_wm as the hardware doesn't actually - * use it. It only gets used for calculating the required - * ddb allocation. - */ - if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level])) - return false; - } - - return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm) && - skl_wm_level_equals(&wm1->sagv.wm0, &wm2->sagv.wm0) && - skl_wm_level_equals(&wm1->sagv.trans_wm, &wm2->sagv.trans_wm); -} - -static bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a, - const struct skl_ddb_entry *b) -{ - return a->start < b->end && b->start < a->end; -} - -static void skl_ddb_entry_union(struct skl_ddb_entry *a, - const struct skl_ddb_entry *b) -{ - if (a->end && b->end) { - a->start = min(a->start, b->start); - a->end = max(a->end, b->end); - } else if (b->end) { - a->start = b->start; - a->end = b->end; - } -} - -bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb, - const struct skl_ddb_entry *entries, - int num_entries, int ignore_idx) -{ - int i; - - for (i = 0; i < num_entries; i++) { - if (i != ignore_idx && - skl_ddb_entries_overlap(ddb, &entries[i])) - return true; - } - - return false; -} - -static int -skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state, - struct intel_crtc_state *new_crtc_state) -{ - struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->uapi.state); - struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_plane *plane; - - for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { - struct intel_plane_state *plane_state; - enum plane_id plane_id = plane->id; - - if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb[plane_id], - &new_crtc_state->wm.skl.plane_ddb[plane_id]) && - skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id], - &new_crtc_state->wm.skl.plane_ddb_y[plane_id])) - continue; - - plane_state = intel_atomic_get_plane_state(state, plane); - if (IS_ERR(plane_state)) - return PTR_ERR(plane_state); - - new_crtc_state->update_planes |= BIT(plane_id); - } - - return 0; -} - -static u8 intel_dbuf_enabled_slices(const struct intel_dbuf_state *dbuf_state) -{ - struct drm_i915_private *dev_priv = to_i915(dbuf_state->base.state->base.dev); - u8 enabled_slices; - enum pipe pipe; - - /* - * FIXME: For now we always enable slice S1 as per - * the Bspec display initialization sequence. - */ - enabled_slices = BIT(DBUF_S1); - - for_each_pipe(dev_priv, pipe) - enabled_slices |= dbuf_state->slices[pipe]; - - return enabled_slices; -} - -static int -skl_compute_ddb(struct intel_atomic_state *state) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - const struct intel_dbuf_state *old_dbuf_state; - struct intel_dbuf_state *new_dbuf_state = NULL; - const struct intel_crtc_state *old_crtc_state; - struct intel_crtc_state *new_crtc_state; - struct intel_crtc *crtc; - int ret, i; - - for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { - new_dbuf_state = intel_atomic_get_dbuf_state(state); - if (IS_ERR(new_dbuf_state)) - return PTR_ERR(new_dbuf_state); - - old_dbuf_state = intel_atomic_get_old_dbuf_state(state); - break; - } - - if (!new_dbuf_state) - return 0; - - new_dbuf_state->active_pipes = - intel_calc_active_pipes(state, old_dbuf_state->active_pipes); - - if (old_dbuf_state->active_pipes != new_dbuf_state->active_pipes) { - ret = intel_atomic_lock_global_state(&new_dbuf_state->base); - if (ret) - return ret; - } - - if (HAS_MBUS_JOINING(dev_priv)) - new_dbuf_state->joined_mbus = - adlp_check_mbus_joined(new_dbuf_state->active_pipes); - - for_each_intel_crtc(&dev_priv->drm, crtc) { - enum pipe pipe = crtc->pipe; - - new_dbuf_state->slices[pipe] = - skl_compute_dbuf_slices(crtc, new_dbuf_state->active_pipes, - new_dbuf_state->joined_mbus); - - if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe]) - continue; - - ret = intel_atomic_lock_global_state(&new_dbuf_state->base); - if (ret) - return ret; - } - - new_dbuf_state->enabled_slices = intel_dbuf_enabled_slices(new_dbuf_state); - - if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices || - old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) { - ret = intel_atomic_serialize_global_state(&new_dbuf_state->base); - if (ret) - return ret; - - if (old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) { - /* TODO: Implement vblank synchronized MBUS joining changes */ - ret = intel_modeset_all_pipes(state); - if (ret) - return ret; - } - - drm_dbg_kms(&dev_priv->drm, - "Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x), mbus joined? %s->%s\n", - old_dbuf_state->enabled_slices, - new_dbuf_state->enabled_slices, - INTEL_INFO(dev_priv)->display.dbuf.slice_mask, - str_yes_no(old_dbuf_state->joined_mbus), - str_yes_no(new_dbuf_state->joined_mbus)); - } - - for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { - enum pipe pipe = crtc->pipe; - - new_dbuf_state->weight[pipe] = intel_crtc_ddb_weight(new_crtc_state); - - if (old_dbuf_state->weight[pipe] == new_dbuf_state->weight[pipe]) - continue; - - ret = intel_atomic_lock_global_state(&new_dbuf_state->base); - if (ret) - return ret; - } - - for_each_intel_crtc(&dev_priv->drm, crtc) { - ret = skl_crtc_allocate_ddb(state, crtc); - if (ret) - return ret; - } - - for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, - new_crtc_state, i) { - ret = skl_crtc_allocate_plane_ddb(state, crtc); - if (ret) - return ret; - - ret = skl_ddb_add_affected_planes(old_crtc_state, - new_crtc_state); - if (ret) - return ret; - } - - return 0; -} - -static char enast(bool enable) -{ - return enable ? '*' : ' '; -} - -static void -skl_print_wm_changes(struct intel_atomic_state *state) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - const struct intel_crtc_state *old_crtc_state; - const struct intel_crtc_state *new_crtc_state; - struct intel_plane *plane; - struct intel_crtc *crtc; - int i; - - if (!drm_debug_enabled(DRM_UT_KMS)) - return; - - for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, - new_crtc_state, i) { - const struct skl_pipe_wm *old_pipe_wm, *new_pipe_wm; - - old_pipe_wm = &old_crtc_state->wm.skl.optimal; - new_pipe_wm = &new_crtc_state->wm.skl.optimal; - - for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { - enum plane_id plane_id = plane->id; - const struct skl_ddb_entry *old, *new; - - old = &old_crtc_state->wm.skl.plane_ddb[plane_id]; - new = &new_crtc_state->wm.skl.plane_ddb[plane_id]; - - if (skl_ddb_entry_equal(old, new)) - continue; - - drm_dbg_kms(&dev_priv->drm, - "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n", - plane->base.base.id, plane->base.name, - old->start, old->end, new->start, new->end, - skl_ddb_entry_size(old), skl_ddb_entry_size(new)); - } - - for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { - enum plane_id plane_id = plane->id; - const struct skl_plane_wm *old_wm, *new_wm; - - old_wm = &old_pipe_wm->planes[plane_id]; - new_wm = &new_pipe_wm->planes[plane_id]; - - if (skl_plane_wm_equals(dev_priv, old_wm, new_wm)) - continue; - - drm_dbg_kms(&dev_priv->drm, - "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm" - " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n", - plane->base.base.id, plane->base.name, - enast(old_wm->wm[0].enable), enast(old_wm->wm[1].enable), - enast(old_wm->wm[2].enable), enast(old_wm->wm[3].enable), - enast(old_wm->wm[4].enable), enast(old_wm->wm[5].enable), - enast(old_wm->wm[6].enable), enast(old_wm->wm[7].enable), - enast(old_wm->trans_wm.enable), - enast(old_wm->sagv.wm0.enable), - enast(old_wm->sagv.trans_wm.enable), - enast(new_wm->wm[0].enable), enast(new_wm->wm[1].enable), - enast(new_wm->wm[2].enable), enast(new_wm->wm[3].enable), - enast(new_wm->wm[4].enable), enast(new_wm->wm[5].enable), - enast(new_wm->wm[6].enable), enast(new_wm->wm[7].enable), - enast(new_wm->trans_wm.enable), - enast(new_wm->sagv.wm0.enable), - enast(new_wm->sagv.trans_wm.enable)); - - drm_dbg_kms(&dev_priv->drm, - "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d" - " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n", - plane->base.base.id, plane->base.name, - enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].lines, - enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].lines, - enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].lines, - enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].lines, - enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].lines, - enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].lines, - enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].lines, - enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].lines, - enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.lines, - enast(old_wm->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines, - enast(old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm.lines, - enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].lines, - enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].lines, - enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].lines, - enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].lines, - enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].lines, - enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].lines, - enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].lines, - enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].lines, - enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.lines, - enast(new_wm->sagv.wm0.ignore_lines), new_wm->sagv.wm0.lines, - enast(new_wm->sagv.trans_wm.ignore_lines), new_wm->sagv.trans_wm.lines); - - drm_dbg_kms(&dev_priv->drm, - "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" - " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", - plane->base.base.id, plane->base.name, - old_wm->wm[0].blocks, old_wm->wm[1].blocks, - old_wm->wm[2].blocks, old_wm->wm[3].blocks, - old_wm->wm[4].blocks, old_wm->wm[5].blocks, - old_wm->wm[6].blocks, old_wm->wm[7].blocks, - old_wm->trans_wm.blocks, - old_wm->sagv.wm0.blocks, - old_wm->sagv.trans_wm.blocks, - new_wm->wm[0].blocks, new_wm->wm[1].blocks, - new_wm->wm[2].blocks, new_wm->wm[3].blocks, - new_wm->wm[4].blocks, new_wm->wm[5].blocks, - new_wm->wm[6].blocks, new_wm->wm[7].blocks, - new_wm->trans_wm.blocks, - new_wm->sagv.wm0.blocks, - new_wm->sagv.trans_wm.blocks); - - drm_dbg_kms(&dev_priv->drm, - "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" - " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", - plane->base.base.id, plane->base.name, - old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc, - old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc, - old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc, - old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc, - old_wm->trans_wm.min_ddb_alloc, - old_wm->sagv.wm0.min_ddb_alloc, - old_wm->sagv.trans_wm.min_ddb_alloc, - new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc, - new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc, - new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc, - new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc, - new_wm->trans_wm.min_ddb_alloc, - new_wm->sagv.wm0.min_ddb_alloc, - new_wm->sagv.trans_wm.min_ddb_alloc); - } - } -} - -static bool skl_plane_selected_wm_equals(struct intel_plane *plane, - const struct skl_pipe_wm *old_pipe_wm, - const struct skl_pipe_wm *new_pipe_wm) -{ - struct drm_i915_private *i915 = to_i915(plane->base.dev); - int level, max_level = ilk_wm_max_level(i915); - - for (level = 0; level <= max_level; level++) { - /* - * We don't check uv_wm as the hardware doesn't actually - * use it. It only gets used for calculating the required - * ddb allocation. - */ - if (!skl_wm_level_equals(skl_plane_wm_level(old_pipe_wm, plane->id, level), - skl_plane_wm_level(new_pipe_wm, plane->id, level))) - return false; - } - - if (HAS_HW_SAGV_WM(i915)) { - const struct skl_plane_wm *old_wm = &old_pipe_wm->planes[plane->id]; - const struct skl_plane_wm *new_wm = &new_pipe_wm->planes[plane->id]; - - if (!skl_wm_level_equals(&old_wm->sagv.wm0, &new_wm->sagv.wm0) || - !skl_wm_level_equals(&old_wm->sagv.trans_wm, &new_wm->sagv.trans_wm)) - return false; - } - - return skl_wm_level_equals(skl_plane_trans_wm(old_pipe_wm, plane->id), - skl_plane_trans_wm(new_pipe_wm, plane->id)); -} - -/* - * To make sure the cursor watermark registers are always consistent - * with our computed state the following scenario needs special - * treatment: - * - * 1. enable cursor - * 2. move cursor entirely offscreen - * 3. disable cursor - * - * Step 2. does call .disable_plane() but does not zero the watermarks - * (since we consider an offscreen cursor still active for the purposes - * of watermarks). Step 3. would not normally call .disable_plane() - * because the actual plane visibility isn't changing, and we don't - * deallocate the cursor ddb until the pipe gets disabled. So we must - * force step 3. to call .disable_plane() to update the watermark - * registers properly. - * - * Other planes do not suffer from this issues as their watermarks are - * calculated based on the actual plane visibility. The only time this - * can trigger for the other planes is during the initial readout as the - * default value of the watermarks registers is not zero. - */ -static int skl_wm_add_affected_planes(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - const struct intel_crtc_state *old_crtc_state = - intel_atomic_get_old_crtc_state(state, crtc); - struct intel_crtc_state *new_crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); - struct intel_plane *plane; - - for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { - struct intel_plane_state *plane_state; - enum plane_id plane_id = plane->id; - - /* - * Force a full wm update for every plane on modeset. - * Required because the reset value of the wm registers - * is non-zero, whereas we want all disabled planes to - * have zero watermarks. So if we turn off the relevant - * power well the hardware state will go out of sync - * with the software state. - */ - if (!drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi) && - skl_plane_selected_wm_equals(plane, - &old_crtc_state->wm.skl.optimal, - &new_crtc_state->wm.skl.optimal)) - continue; - - plane_state = intel_atomic_get_plane_state(state, plane); - if (IS_ERR(plane_state)) - return PTR_ERR(plane_state); - - new_crtc_state->update_planes |= BIT(plane_id); - } - - return 0; -} - -static int -skl_compute_wm(struct intel_atomic_state *state) -{ - struct intel_crtc *crtc; - struct intel_crtc_state *new_crtc_state; - int ret, i; - - for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { - ret = skl_build_pipe_wm(state, crtc); - if (ret) - return ret; - } - - ret = skl_compute_ddb(state); - if (ret) - return ret; - - ret = intel_compute_sagv_mask(state); - if (ret) - return ret; - - /* - * skl_compute_ddb() will have adjusted the final watermarks - * based on how much ddb is available. Now we can actually - * check if the final watermarks changed. - */ - for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { - ret = skl_wm_add_affected_planes(state, crtc); - if (ret) - return ret; - } - - skl_print_wm_changes(state); - - return 0; -} - static void ilk_compute_wm_config(struct drm_i915_private *dev_priv, struct intel_wm_config *config) { @@ -6458,10 +3583,10 @@ static void ilk_initial_watermarks(struct intel_atomic_state *state, const struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - mutex_lock(&dev_priv->wm.wm_mutex); + mutex_lock(&dev_priv->display.wm.wm_mutex); crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate; ilk_program_watermarks(dev_priv); - mutex_unlock(&dev_priv->wm.wm_mutex); + mutex_unlock(&dev_priv->display.wm.wm_mutex); } static void ilk_optimize_watermarks(struct intel_atomic_state *state, @@ -6474,210 +3599,17 @@ static void ilk_optimize_watermarks(struct intel_atomic_state *state, if (!crtc_state->wm.need_postvbl_update) return; - mutex_lock(&dev_priv->wm.wm_mutex); + mutex_lock(&dev_priv->display.wm.wm_mutex); crtc->wm.active.ilk = crtc_state->wm.ilk.optimal; ilk_program_watermarks(dev_priv); - mutex_unlock(&dev_priv->wm.wm_mutex); -} - -static void skl_wm_level_from_reg_val(u32 val, struct skl_wm_level *level) -{ - level->enable = val & PLANE_WM_EN; - level->ignore_lines = val & PLANE_WM_IGNORE_LINES; - level->blocks = REG_FIELD_GET(PLANE_WM_BLOCKS_MASK, val); - level->lines = REG_FIELD_GET(PLANE_WM_LINES_MASK, val); -} - -static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc, - struct skl_pipe_wm *out) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum pipe pipe = crtc->pipe; - int level, max_level; - enum plane_id plane_id; - u32 val; - - max_level = ilk_wm_max_level(dev_priv); - - for_each_plane_id_on_crtc(crtc, plane_id) { - struct skl_plane_wm *wm = &out->planes[plane_id]; - - for (level = 0; level <= max_level; level++) { - if (plane_id != PLANE_CURSOR) - val = intel_uncore_read(&dev_priv->uncore, PLANE_WM(pipe, plane_id, level)); - else - val = intel_uncore_read(&dev_priv->uncore, CUR_WM(pipe, level)); - - skl_wm_level_from_reg_val(val, &wm->wm[level]); - } - - if (plane_id != PLANE_CURSOR) - val = intel_uncore_read(&dev_priv->uncore, PLANE_WM_TRANS(pipe, plane_id)); - else - val = intel_uncore_read(&dev_priv->uncore, CUR_WM_TRANS(pipe)); - - skl_wm_level_from_reg_val(val, &wm->trans_wm); - - if (HAS_HW_SAGV_WM(dev_priv)) { - if (plane_id != PLANE_CURSOR) - val = intel_uncore_read(&dev_priv->uncore, - PLANE_WM_SAGV(pipe, plane_id)); - else - val = intel_uncore_read(&dev_priv->uncore, - CUR_WM_SAGV(pipe)); - - skl_wm_level_from_reg_val(val, &wm->sagv.wm0); - - if (plane_id != PLANE_CURSOR) - val = intel_uncore_read(&dev_priv->uncore, - PLANE_WM_SAGV_TRANS(pipe, plane_id)); - else - val = intel_uncore_read(&dev_priv->uncore, - CUR_WM_SAGV_TRANS(pipe)); - - skl_wm_level_from_reg_val(val, &wm->sagv.trans_wm); - } else if (DISPLAY_VER(dev_priv) >= 12) { - wm->sagv.wm0 = wm->wm[0]; - wm->sagv.trans_wm = wm->trans_wm; - } - } -} - -void skl_wm_get_hw_state(struct drm_i915_private *dev_priv) -{ - struct intel_dbuf_state *dbuf_state = - to_intel_dbuf_state(dev_priv->dbuf.obj.state); - struct intel_crtc *crtc; - - if (HAS_MBUS_JOINING(dev_priv)) - dbuf_state->joined_mbus = intel_de_read(dev_priv, MBUS_CTL) & MBUS_JOIN; - - for_each_intel_crtc(&dev_priv->drm, crtc) { - struct intel_crtc_state *crtc_state = - to_intel_crtc_state(crtc->base.state); - enum pipe pipe = crtc->pipe; - unsigned int mbus_offset; - enum plane_id plane_id; - u8 slices; - - skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal); - crtc_state->wm.skl.raw = crtc_state->wm.skl.optimal; - - memset(&dbuf_state->ddb[pipe], 0, sizeof(dbuf_state->ddb[pipe])); - - for_each_plane_id_on_crtc(crtc, plane_id) { - struct skl_ddb_entry *ddb = - &crtc_state->wm.skl.plane_ddb[plane_id]; - struct skl_ddb_entry *ddb_y = - &crtc_state->wm.skl.plane_ddb_y[plane_id]; - - skl_ddb_get_hw_plane_state(dev_priv, crtc->pipe, - plane_id, ddb, ddb_y); - - skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb); - skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_y); - } - - dbuf_state->weight[pipe] = intel_crtc_ddb_weight(crtc_state); - - /* - * Used for checking overlaps, so we need absolute - * offsets instead of MBUS relative offsets. - */ - slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes, - dbuf_state->joined_mbus); - mbus_offset = mbus_ddb_offset(dev_priv, slices); - crtc_state->wm.skl.ddb.start = mbus_offset + dbuf_state->ddb[pipe].start; - crtc_state->wm.skl.ddb.end = mbus_offset + dbuf_state->ddb[pipe].end; - - /* The slices actually used by the planes on the pipe */ - dbuf_state->slices[pipe] = - skl_ddb_dbuf_slice_mask(dev_priv, &crtc_state->wm.skl.ddb); - - drm_dbg_kms(&dev_priv->drm, - "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n", - crtc->base.base.id, crtc->base.name, - dbuf_state->slices[pipe], dbuf_state->ddb[pipe].start, - dbuf_state->ddb[pipe].end, dbuf_state->active_pipes, - str_yes_no(dbuf_state->joined_mbus)); - } - - dbuf_state->enabled_slices = dev_priv->dbuf.enabled_slices; -} - -static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915) -{ - const struct intel_dbuf_state *dbuf_state = - to_intel_dbuf_state(i915->dbuf.obj.state); - struct skl_ddb_entry entries[I915_MAX_PIPES] = {}; - struct intel_crtc *crtc; - - for_each_intel_crtc(&i915->drm, crtc) { - const struct intel_crtc_state *crtc_state = - to_intel_crtc_state(crtc->base.state); - - entries[crtc->pipe] = crtc_state->wm.skl.ddb; - } - - for_each_intel_crtc(&i915->drm, crtc) { - const struct intel_crtc_state *crtc_state = - to_intel_crtc_state(crtc->base.state); - u8 slices; - - slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes, - dbuf_state->joined_mbus); - if (dbuf_state->slices[crtc->pipe] & ~slices) - return true; - - if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.ddb, entries, - I915_MAX_PIPES, crtc->pipe)) - return true; - } - - return false; -} - -void skl_wm_sanitize(struct drm_i915_private *i915) -{ - struct intel_crtc *crtc; - - /* - * On TGL/RKL (at least) the BIOS likes to assign the planes - * to the wrong DBUF slices. This will cause an infinite loop - * in skl_commit_modeset_enables() as it can't find a way to - * transition between the old bogus DBUF layout to the new - * proper DBUF layout without DBUF allocation overlaps between - * the planes (which cannot be allowed or else the hardware - * may hang). If we detect a bogus DBUF layout just turn off - * all the planes so that skl_commit_modeset_enables() can - * simply ignore them. - */ - if (!skl_dbuf_is_misconfigured(i915)) - return; - - drm_dbg_kms(&i915->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n"); - - for_each_intel_crtc(&i915->drm, crtc) { - struct intel_plane *plane = to_intel_plane(crtc->base.primary); - const struct intel_plane_state *plane_state = - to_intel_plane_state(plane->base.state); - struct intel_crtc_state *crtc_state = - to_intel_crtc_state(crtc->base.state); - - if (plane_state->uapi.visible) - intel_plane_disable_noatomic(crtc, plane); - - drm_WARN_ON(&i915->drm, crtc_state->active_planes != 0); - - memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb)); - } + mutex_unlock(&dev_priv->display.wm.wm_mutex); } static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct ilk_wm_values *hw = &dev_priv->wm.hw; + struct ilk_wm_values *hw = &dev_priv->display.wm.hw; struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal; enum pipe pipe = crtc->pipe; @@ -6825,7 +3757,7 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv, void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv) { - struct g4x_wm_values *wm = &dev_priv->wm.g4x; + struct g4x_wm_values *wm = &dev_priv->display.wm.g4x; struct intel_crtc *crtc; g4x_read_wm_values(dev_priv, wm); @@ -6919,7 +3851,7 @@ void g4x_wm_sanitize(struct drm_i915_private *dev_priv) struct intel_plane *plane; struct intel_crtc *crtc; - mutex_lock(&dev_priv->wm.wm_mutex); + mutex_lock(&dev_priv->display.wm.wm_mutex); for_each_intel_plane(&dev_priv->drm, plane) { struct intel_crtc *crtc = @@ -6967,12 +3899,12 @@ void g4x_wm_sanitize(struct drm_i915_private *dev_priv) g4x_program_watermarks(dev_priv); - mutex_unlock(&dev_priv->wm.wm_mutex); + mutex_unlock(&dev_priv->display.wm.wm_mutex); } void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv) { - struct vlv_wm_values *wm = &dev_priv->wm.vlv; + struct vlv_wm_values *wm = &dev_priv->display.wm.vlv; struct intel_crtc *crtc; u32 val; @@ -7006,7 +3938,7 @@ void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv) drm_dbg_kms(&dev_priv->drm, "Punit not acking DDR DVFS request, " "assuming DDR DVFS is disabled\n"); - dev_priv->wm.max_level = VLV_WM_LEVEL_PM5; + dev_priv->display.wm.max_level = VLV_WM_LEVEL_PM5; } else { val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); if ((val & FORCE_DDR_HIGH_FREQ) == 0) @@ -7075,7 +4007,7 @@ void vlv_wm_sanitize(struct drm_i915_private *dev_priv) struct intel_plane *plane; struct intel_crtc *crtc; - mutex_lock(&dev_priv->wm.wm_mutex); + mutex_lock(&dev_priv->display.wm.wm_mutex); for_each_intel_plane(&dev_priv->drm, plane) { struct intel_crtc *crtc = @@ -7116,7 +4048,7 @@ void vlv_wm_sanitize(struct drm_i915_private *dev_priv) vlv_program_watermarks(dev_priv); - mutex_unlock(&dev_priv->wm.wm_mutex); + mutex_unlock(&dev_priv->display.wm.wm_mutex); } /* @@ -7137,7 +4069,7 @@ static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv) void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv) { - struct ilk_wm_values *hw = &dev_priv->wm.hw; + struct ilk_wm_values *hw = &dev_priv->display.wm.hw; struct intel_crtc *crtc; ilk_init_lp_watermarks(dev_priv); @@ -7166,168 +4098,6 @@ void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv) !(intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) & DISP_FBC_WM_DIS); } -void intel_wm_state_verify(struct intel_crtc *crtc, - struct intel_crtc_state *new_crtc_state) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct skl_hw_state { - struct skl_ddb_entry ddb[I915_MAX_PLANES]; - struct skl_ddb_entry ddb_y[I915_MAX_PLANES]; - struct skl_pipe_wm wm; - } *hw; - const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal; - int level, max_level = ilk_wm_max_level(dev_priv); - struct intel_plane *plane; - u8 hw_enabled_slices; - - if (DISPLAY_VER(dev_priv) < 9 || !new_crtc_state->hw.active) - return; - - hw = kzalloc(sizeof(*hw), GFP_KERNEL); - if (!hw) - return; - - skl_pipe_wm_get_hw_state(crtc, &hw->wm); - - skl_pipe_ddb_get_hw_state(crtc, hw->ddb, hw->ddb_y); - - hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv); - - if (DISPLAY_VER(dev_priv) >= 11 && - hw_enabled_slices != dev_priv->dbuf.enabled_slices) - drm_err(&dev_priv->drm, - "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n", - dev_priv->dbuf.enabled_slices, - hw_enabled_slices); - - for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { - const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry; - const struct skl_wm_level *hw_wm_level, *sw_wm_level; - - /* Watermarks */ - for (level = 0; level <= max_level; level++) { - hw_wm_level = &hw->wm.planes[plane->id].wm[level]; - sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level); - - if (skl_wm_level_equals(hw_wm_level, sw_wm_level)) - continue; - - drm_err(&dev_priv->drm, - "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", - plane->base.base.id, plane->base.name, level, - sw_wm_level->enable, - sw_wm_level->blocks, - sw_wm_level->lines, - hw_wm_level->enable, - hw_wm_level->blocks, - hw_wm_level->lines); - } - - hw_wm_level = &hw->wm.planes[plane->id].trans_wm; - sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id); - - if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) { - drm_err(&dev_priv->drm, - "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", - plane->base.base.id, plane->base.name, - sw_wm_level->enable, - sw_wm_level->blocks, - sw_wm_level->lines, - hw_wm_level->enable, - hw_wm_level->blocks, - hw_wm_level->lines); - } - - hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0; - sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0; - - if (HAS_HW_SAGV_WM(dev_priv) && - !skl_wm_level_equals(hw_wm_level, sw_wm_level)) { - drm_err(&dev_priv->drm, - "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", - plane->base.base.id, plane->base.name, - sw_wm_level->enable, - sw_wm_level->blocks, - sw_wm_level->lines, - hw_wm_level->enable, - hw_wm_level->blocks, - hw_wm_level->lines); - } - - hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm; - sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm; - - if (HAS_HW_SAGV_WM(dev_priv) && - !skl_wm_level_equals(hw_wm_level, sw_wm_level)) { - drm_err(&dev_priv->drm, - "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", - plane->base.base.id, plane->base.name, - sw_wm_level->enable, - sw_wm_level->blocks, - sw_wm_level->lines, - hw_wm_level->enable, - hw_wm_level->blocks, - hw_wm_level->lines); - } - - /* DDB */ - hw_ddb_entry = &hw->ddb[PLANE_CURSOR]; - sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb[PLANE_CURSOR]; - - if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { - drm_err(&dev_priv->drm, - "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n", - plane->base.base.id, plane->base.name, - sw_ddb_entry->start, sw_ddb_entry->end, - hw_ddb_entry->start, hw_ddb_entry->end); - } - } - - kfree(hw); -} - -void intel_enable_ipc(struct drm_i915_private *dev_priv) -{ - u32 val; - - if (!HAS_IPC(dev_priv)) - return; - - val = intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2); - - if (dev_priv->ipc_enabled) - val |= DISP_IPC_ENABLE; - else - val &= ~DISP_IPC_ENABLE; - - intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL2, val); -} - -static bool intel_can_enable_ipc(struct drm_i915_private *dev_priv) -{ - /* Display WA #0477 WaDisableIPC: skl */ - if (IS_SKYLAKE(dev_priv)) - return false; - - /* Display WA #1141: SKL:all KBL:all CFL */ - if (IS_KABYLAKE(dev_priv) || - IS_COFFEELAKE(dev_priv) || - IS_COMETLAKE(dev_priv)) - return dev_priv->dram_info.symmetric_memory; - - return true; -} - -void intel_init_ipc(struct drm_i915_private *dev_priv) -{ - if (!HAS_IPC(dev_priv)) - return; - - dev_priv->ipc_enabled = intel_can_enable_ipc(dev_priv); - - intel_enable_ipc(dev_priv); -} - static void ibx_init_clock_gating(struct drm_i915_private *dev_priv) { /* @@ -7435,7 +4205,7 @@ static void cpt_init_clock_gating(struct drm_i915_private *dev_priv) val = intel_uncore_read(&dev_priv->uncore, TRANS_CHICKEN2(pipe)); val |= TRANS_CHICKEN2_TIMING_OVERRIDE; val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED; - if (dev_priv->vbt.fdi_rx_polarity_inverted) + if (dev_priv->display.vbt.fdi_rx_polarity_inverted) val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED; val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER; val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH; @@ -7586,9 +4356,8 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv) static void gen12lp_init_clock_gating(struct drm_i915_private *dev_priv) { - /* Wa_1409120013:tgl,rkl,adl-s,dg1,dg2 */ - if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv) || - IS_ALDERLAKE_S(dev_priv) || IS_DG1(dev_priv) || IS_DG2(dev_priv)) + /* Wa_1409120013 */ + if (DISPLAY_VER(dev_priv) == 12) intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A), DPFC_CHICKEN_COMP_DUMMY_PIXEL); @@ -7965,7 +4734,7 @@ static void g4x_init_clock_gating(struct drm_i915_private *dev_priv) OVCUNIT_CLOCK_GATE_DISABLE; if (IS_GM45(dev_priv)) dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE; - intel_uncore_write(&dev_priv->uncore, DSPCLK_GATE_D, dspclk_gate); + intel_uncore_write(&dev_priv->uncore, DSPCLK_GATE_D(dev_priv), dspclk_gate); g4x_disable_trickle_feed(dev_priv); } @@ -7976,7 +4745,7 @@ static void i965gm_init_clock_gating(struct drm_i915_private *dev_priv) intel_uncore_write(uncore, RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE); intel_uncore_write(uncore, RENCLK_GATE_D2, 0); - intel_uncore_write(uncore, DSPCLK_GATE_D, 0); + intel_uncore_write(uncore, DSPCLK_GATE_D(dev_priv), 0); intel_uncore_write(uncore, RAMCLK_GATE_D, 0); intel_uncore_write16(uncore, DEUC, 0); intel_uncore_write(uncore, @@ -8168,18 +4937,14 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv) } } -static const struct drm_i915_wm_disp_funcs skl_wm_funcs = { - .compute_global_watermarks = skl_compute_wm, -}; - -static const struct drm_i915_wm_disp_funcs ilk_wm_funcs = { +static const struct intel_wm_funcs ilk_wm_funcs = { .compute_pipe_wm = ilk_compute_pipe_wm, .compute_intermediate_wm = ilk_compute_intermediate_wm, .initial_watermarks = ilk_initial_watermarks, .optimize_watermarks = ilk_optimize_watermarks, }; -static const struct drm_i915_wm_disp_funcs vlv_wm_funcs = { +static const struct intel_wm_funcs vlv_wm_funcs = { .compute_pipe_wm = vlv_compute_pipe_wm, .compute_intermediate_wm = vlv_compute_intermediate_wm, .initial_watermarks = vlv_initial_watermarks, @@ -8187,67 +4952,67 @@ static const struct drm_i915_wm_disp_funcs vlv_wm_funcs = { .atomic_update_watermarks = vlv_atomic_update_fifo, }; -static const struct drm_i915_wm_disp_funcs g4x_wm_funcs = { +static const struct intel_wm_funcs g4x_wm_funcs = { .compute_pipe_wm = g4x_compute_pipe_wm, .compute_intermediate_wm = g4x_compute_intermediate_wm, .initial_watermarks = g4x_initial_watermarks, .optimize_watermarks = g4x_optimize_watermarks, }; -static const struct drm_i915_wm_disp_funcs pnv_wm_funcs = { +static const struct intel_wm_funcs pnv_wm_funcs = { .update_wm = pnv_update_wm, }; -static const struct drm_i915_wm_disp_funcs i965_wm_funcs = { +static const struct intel_wm_funcs i965_wm_funcs = { .update_wm = i965_update_wm, }; -static const struct drm_i915_wm_disp_funcs i9xx_wm_funcs = { +static const struct intel_wm_funcs i9xx_wm_funcs = { .update_wm = i9xx_update_wm, }; -static const struct drm_i915_wm_disp_funcs i845_wm_funcs = { +static const struct intel_wm_funcs i845_wm_funcs = { .update_wm = i845_update_wm, }; -static const struct drm_i915_wm_disp_funcs nop_funcs = { +static const struct intel_wm_funcs nop_funcs = { }; /* Set up chip specific power management-related functions */ void intel_init_pm(struct drm_i915_private *dev_priv) { + if (DISPLAY_VER(dev_priv) >= 9) { + skl_wm_init(dev_priv); + return; + } + /* For cxsr */ if (IS_PINEVIEW(dev_priv)) pnv_get_mem_freq(dev_priv); else if (GRAPHICS_VER(dev_priv) == 5) ilk_get_mem_freq(dev_priv); - intel_sagv_init(dev_priv); - /* For FIFO watermark updates */ - if (DISPLAY_VER(dev_priv) >= 9) { - skl_setup_wm_latency(dev_priv); - dev_priv->wm_disp = &skl_wm_funcs; - } else if (HAS_PCH_SPLIT(dev_priv)) { + if (HAS_PCH_SPLIT(dev_priv)) { ilk_setup_wm_latency(dev_priv); - if ((DISPLAY_VER(dev_priv) == 5 && dev_priv->wm.pri_latency[1] && - dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) || - (DISPLAY_VER(dev_priv) != 5 && dev_priv->wm.pri_latency[0] && - dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) { - dev_priv->wm_disp = &ilk_wm_funcs; + if ((DISPLAY_VER(dev_priv) == 5 && dev_priv->display.wm.pri_latency[1] && + dev_priv->display.wm.spr_latency[1] && dev_priv->display.wm.cur_latency[1]) || + (DISPLAY_VER(dev_priv) != 5 && dev_priv->display.wm.pri_latency[0] && + dev_priv->display.wm.spr_latency[0] && dev_priv->display.wm.cur_latency[0])) { + dev_priv->display.funcs.wm = &ilk_wm_funcs; } else { drm_dbg_kms(&dev_priv->drm, "Failed to read display plane latency. " "Disable CxSR\n"); - dev_priv->wm_disp = &nop_funcs; + dev_priv->display.funcs.wm = &nop_funcs; } } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { vlv_setup_wm_latency(dev_priv); - dev_priv->wm_disp = &vlv_wm_funcs; + dev_priv->display.funcs.wm = &vlv_wm_funcs; } else if (IS_G4X(dev_priv)) { g4x_setup_wm_latency(dev_priv); - dev_priv->wm_disp = &g4x_wm_funcs; + dev_priv->display.funcs.wm = &g4x_wm_funcs; } else if (IS_PINEVIEW(dev_priv)) { if (!intel_get_cxsr_latency(!IS_MOBILE(dev_priv), dev_priv->is_ddr3, @@ -8261,22 +5026,22 @@ void intel_init_pm(struct drm_i915_private *dev_priv) dev_priv->fsb_freq, dev_priv->mem_freq); /* Disable CxSR and never update its watermark again */ intel_set_memory_cxsr(dev_priv, false); - dev_priv->wm_disp = &nop_funcs; + dev_priv->display.funcs.wm = &nop_funcs; } else - dev_priv->wm_disp = &pnv_wm_funcs; + dev_priv->display.funcs.wm = &pnv_wm_funcs; } else if (DISPLAY_VER(dev_priv) == 4) { - dev_priv->wm_disp = &i965_wm_funcs; + dev_priv->display.funcs.wm = &i965_wm_funcs; } else if (DISPLAY_VER(dev_priv) == 3) { - dev_priv->wm_disp = &i9xx_wm_funcs; + dev_priv->display.funcs.wm = &i9xx_wm_funcs; } else if (DISPLAY_VER(dev_priv) == 2) { if (INTEL_NUM_PIPES(dev_priv) == 1) - dev_priv->wm_disp = &i845_wm_funcs; + dev_priv->display.funcs.wm = &i845_wm_funcs; else - dev_priv->wm_disp = &i9xx_wm_funcs; + dev_priv->display.funcs.wm = &i9xx_wm_funcs; } else { drm_err(&dev_priv->drm, "unexpected fall-through in %s\n", __func__); - dev_priv->wm_disp = &nop_funcs; + dev_priv->display.funcs.wm = &nop_funcs; } } @@ -8285,183 +5050,3 @@ void intel_pm_setup(struct drm_i915_private *dev_priv) dev_priv->runtime_pm.suspended = false; atomic_set(&dev_priv->runtime_pm.wakeref_count, 0); } - -static struct intel_global_state *intel_dbuf_duplicate_state(struct intel_global_obj *obj) -{ - struct intel_dbuf_state *dbuf_state; - - dbuf_state = kmemdup(obj->state, sizeof(*dbuf_state), GFP_KERNEL); - if (!dbuf_state) - return NULL; - - return &dbuf_state->base; -} - -static void intel_dbuf_destroy_state(struct intel_global_obj *obj, - struct intel_global_state *state) -{ - kfree(state); -} - -static const struct intel_global_state_funcs intel_dbuf_funcs = { - .atomic_duplicate_state = intel_dbuf_duplicate_state, - .atomic_destroy_state = intel_dbuf_destroy_state, -}; - -struct intel_dbuf_state * -intel_atomic_get_dbuf_state(struct intel_atomic_state *state) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - struct intel_global_state *dbuf_state; - - dbuf_state = intel_atomic_get_global_obj_state(state, &dev_priv->dbuf.obj); - if (IS_ERR(dbuf_state)) - return ERR_CAST(dbuf_state); - - return to_intel_dbuf_state(dbuf_state); -} - -int intel_dbuf_init(struct drm_i915_private *dev_priv) -{ - struct intel_dbuf_state *dbuf_state; - - dbuf_state = kzalloc(sizeof(*dbuf_state), GFP_KERNEL); - if (!dbuf_state) - return -ENOMEM; - - intel_atomic_global_obj_init(dev_priv, &dev_priv->dbuf.obj, - &dbuf_state->base, &intel_dbuf_funcs); - - return 0; -} - -/* - * Configure MBUS_CTL and all DBUF_CTL_S of each slice to join_mbus state before - * update the request state of all DBUS slices. - */ -static void update_mbus_pre_enable(struct intel_atomic_state *state) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - u32 mbus_ctl, dbuf_min_tracker_val; - enum dbuf_slice slice; - const struct intel_dbuf_state *dbuf_state = - intel_atomic_get_new_dbuf_state(state); - - if (!HAS_MBUS_JOINING(dev_priv)) - return; - - /* - * TODO: Implement vblank synchronized MBUS joining changes. - * Must be properly coordinated with dbuf reprogramming. - */ - if (dbuf_state->joined_mbus) { - mbus_ctl = MBUS_HASHING_MODE_1x4 | MBUS_JOIN | - MBUS_JOIN_PIPE_SELECT_NONE; - dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(3); - } else { - mbus_ctl = MBUS_HASHING_MODE_2x2 | - MBUS_JOIN_PIPE_SELECT_NONE; - dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(1); - } - - intel_de_rmw(dev_priv, MBUS_CTL, - MBUS_HASHING_MODE_MASK | MBUS_JOIN | - MBUS_JOIN_PIPE_SELECT_MASK, mbus_ctl); - - for_each_dbuf_slice(dev_priv, slice) - intel_de_rmw(dev_priv, DBUF_CTL_S(slice), - DBUF_MIN_TRACKER_STATE_SERVICE_MASK, - dbuf_min_tracker_val); -} - -void intel_dbuf_pre_plane_update(struct intel_atomic_state *state) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - const struct intel_dbuf_state *new_dbuf_state = - intel_atomic_get_new_dbuf_state(state); - const struct intel_dbuf_state *old_dbuf_state = - intel_atomic_get_old_dbuf_state(state); - - if (!new_dbuf_state || - ((new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices) - && (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus))) - return; - - WARN_ON(!new_dbuf_state->base.changed); - - update_mbus_pre_enable(state); - gen9_dbuf_slices_update(dev_priv, - old_dbuf_state->enabled_slices | - new_dbuf_state->enabled_slices); -} - -void intel_dbuf_post_plane_update(struct intel_atomic_state *state) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - const struct intel_dbuf_state *new_dbuf_state = - intel_atomic_get_new_dbuf_state(state); - const struct intel_dbuf_state *old_dbuf_state = - intel_atomic_get_old_dbuf_state(state); - - if (!new_dbuf_state || - ((new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices) - && (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus))) - return; - - WARN_ON(!new_dbuf_state->base.changed); - - gen9_dbuf_slices_update(dev_priv, - new_dbuf_state->enabled_slices); -} - -void intel_mbus_dbox_update(struct intel_atomic_state *state) -{ - struct drm_i915_private *i915 = to_i915(state->base.dev); - const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state; - const struct intel_crtc_state *new_crtc_state; - const struct intel_crtc *crtc; - u32 val = 0; - int i; - - if (DISPLAY_VER(i915) < 11) - return; - - new_dbuf_state = intel_atomic_get_new_dbuf_state(state); - old_dbuf_state = intel_atomic_get_old_dbuf_state(state); - if (!new_dbuf_state || - (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus && - new_dbuf_state->active_pipes == old_dbuf_state->active_pipes)) - return; - - if (DISPLAY_VER(i915) >= 12) { - val |= MBUS_DBOX_B2B_TRANSACTIONS_MAX(16); - val |= MBUS_DBOX_B2B_TRANSACTIONS_DELAY(1); - val |= MBUS_DBOX_REGULATE_B2B_TRANSACTIONS_EN; - } - - /* Wa_22010947358:adl-p */ - if (IS_ALDERLAKE_P(i915)) - val |= new_dbuf_state->joined_mbus ? MBUS_DBOX_A_CREDIT(6) : - MBUS_DBOX_A_CREDIT(4); - else - val |= MBUS_DBOX_A_CREDIT(2); - - if (IS_ALDERLAKE_P(i915)) { - val |= MBUS_DBOX_BW_CREDIT(2); - val |= MBUS_DBOX_B_CREDIT(8); - } else if (DISPLAY_VER(i915) >= 12) { - val |= MBUS_DBOX_BW_CREDIT(2); - val |= MBUS_DBOX_B_CREDIT(12); - } else { - val |= MBUS_DBOX_BW_CREDIT(1); - val |= MBUS_DBOX_B_CREDIT(8); - } - - for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { - if (!new_crtc_state->hw.active || - !intel_crtc_needs_modeset(new_crtc_state)) - continue; - - intel_de_write(i915, PIPE_MBUS_DBOX_CTL(crtc->pipe), val); - } -} diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h index 945503ae493e..c09b872d65c8 100644 --- a/drivers/gpu/drm/i915/intel_pm.h +++ b/drivers/gpu/drm/i915/intel_pm.h @@ -8,22 +8,9 @@ #include <linux/types.h> -#include "display/intel_display.h" -#include "display/intel_global_state.h" - -#include "i915_drv.h" - -struct drm_device; struct drm_i915_private; -struct i915_request; -struct intel_atomic_state; -struct intel_bw_state; -struct intel_crtc; struct intel_crtc_state; -struct intel_plane; -struct skl_ddb_entry; -struct skl_pipe_wm; -struct skl_wm_level; +struct intel_plane_state; void intel_init_clock_gating(struct drm_i915_private *dev_priv); void intel_suspend_hw(struct drm_i915_private *dev_priv); @@ -34,56 +21,14 @@ void intel_pm_setup(struct drm_i915_private *dev_priv); void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv); void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv); void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv); -void skl_wm_get_hw_state(struct drm_i915_private *dev_priv); -void intel_wm_state_verify(struct intel_crtc *crtc, - struct intel_crtc_state *new_crtc_state); -u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *dev_priv); -void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv); -u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *dev_priv, - const struct skl_ddb_entry *entry); void g4x_wm_sanitize(struct drm_i915_private *dev_priv); void vlv_wm_sanitize(struct drm_i915_private *dev_priv); -void skl_wm_sanitize(struct drm_i915_private *dev_priv); -bool intel_can_enable_sagv(struct drm_i915_private *dev_priv, - const struct intel_bw_state *bw_state); -void intel_sagv_pre_plane_update(struct intel_atomic_state *state); -void intel_sagv_post_plane_update(struct intel_atomic_state *state); -bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb, - const struct skl_ddb_entry *entries, - int num_entries, int ignore_idx); -void skl_write_plane_wm(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state); -void skl_write_cursor_wm(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state); bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv); -void intel_init_ipc(struct drm_i915_private *dev_priv); -void intel_enable_ipc(struct drm_i915_private *dev_priv); +bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state); +void intel_print_wm_latency(struct drm_i915_private *dev_priv, + const char *name, const u16 wm[]); bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable); -struct intel_dbuf_state { - struct intel_global_state base; - - struct skl_ddb_entry ddb[I915_MAX_PIPES]; - unsigned int weight[I915_MAX_PIPES]; - u8 slices[I915_MAX_PIPES]; - u8 enabled_slices; - u8 active_pipes; - bool joined_mbus; -}; - -struct intel_dbuf_state * -intel_atomic_get_dbuf_state(struct intel_atomic_state *state); - -#define to_intel_dbuf_state(x) container_of((x), struct intel_dbuf_state, base) -#define intel_atomic_get_old_dbuf_state(state) \ - to_intel_dbuf_state(intel_atomic_get_old_global_obj_state(state, &to_i915(state->base.dev)->dbuf.obj)) -#define intel_atomic_get_new_dbuf_state(state) \ - to_intel_dbuf_state(intel_atomic_get_new_global_obj_state(state, &to_i915(state->base.dev)->dbuf.obj)) - -int intel_dbuf_init(struct drm_i915_private *dev_priv); -void intel_dbuf_pre_plane_update(struct intel_atomic_state *state); -void intel_dbuf_post_plane_update(struct intel_atomic_state *state); -void intel_mbus_dbox_update(struct intel_atomic_state *state); - #endif /* __INTEL_PM_H__ */ diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index c7ef5f2ff2e1..5cd423c7b646 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -21,6 +21,7 @@ * IN THE SOFTWARE. */ +#include <drm/drm_managed.h> #include <linux/pm_runtime.h> #include "gt/intel_engine_regs.h" @@ -44,29 +45,47 @@ fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains) } void -intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug) +intel_uncore_mmio_debug_init_early(struct drm_i915_private *i915) { - spin_lock_init(&mmio_debug->lock); - mmio_debug->unclaimed_mmio_check = 1; + spin_lock_init(&i915->mmio_debug.lock); + i915->mmio_debug.unclaimed_mmio_check = 1; + + i915->uncore.debug = &i915->mmio_debug; } -static void mmio_debug_suspend(struct intel_uncore_mmio_debug *mmio_debug) +static void mmio_debug_suspend(struct intel_uncore *uncore) { - lockdep_assert_held(&mmio_debug->lock); + if (!uncore->debug) + return; + + spin_lock(&uncore->debug->lock); /* Save and disable mmio debugging for the user bypass */ - if (!mmio_debug->suspend_count++) { - mmio_debug->saved_mmio_check = mmio_debug->unclaimed_mmio_check; - mmio_debug->unclaimed_mmio_check = 0; + if (!uncore->debug->suspend_count++) { + uncore->debug->saved_mmio_check = uncore->debug->unclaimed_mmio_check; + uncore->debug->unclaimed_mmio_check = 0; } + + spin_unlock(&uncore->debug->lock); } -static void mmio_debug_resume(struct intel_uncore_mmio_debug *mmio_debug) +static bool check_for_unclaimed_mmio(struct intel_uncore *uncore); + +static void mmio_debug_resume(struct intel_uncore *uncore) { - lockdep_assert_held(&mmio_debug->lock); + if (!uncore->debug) + return; + + spin_lock(&uncore->debug->lock); + + if (!--uncore->debug->suspend_count) + uncore->debug->unclaimed_mmio_check = uncore->debug->saved_mmio_check; - if (!--mmio_debug->suspend_count) - mmio_debug->unclaimed_mmio_check = mmio_debug->saved_mmio_check; + if (check_for_unclaimed_mmio(uncore)) + drm_info(&uncore->i915->drm, + "Invalid mmio detected during user access\n"); + + spin_unlock(&uncore->debug->lock); } static const char * const forcewake_domain_names[] = { @@ -677,9 +696,7 @@ void intel_uncore_forcewake_user_get(struct intel_uncore *uncore) spin_lock_irq(&uncore->lock); if (!uncore->user_forcewake_count++) { intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL); - spin_lock(&uncore->debug->lock); - mmio_debug_suspend(uncore->debug); - spin_unlock(&uncore->debug->lock); + mmio_debug_suspend(uncore); } spin_unlock_irq(&uncore->lock); } @@ -695,14 +712,7 @@ void intel_uncore_forcewake_user_put(struct intel_uncore *uncore) { spin_lock_irq(&uncore->lock); if (!--uncore->user_forcewake_count) { - spin_lock(&uncore->debug->lock); - mmio_debug_resume(uncore->debug); - - if (check_for_unclaimed_mmio(uncore)) - drm_info(&uncore->i915->drm, - "Invalid mmio detected during user access\n"); - spin_unlock(&uncore->debug->lock); - + mmio_debug_resume(uncore); intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL); } spin_unlock_irq(&uncore->lock); @@ -918,6 +928,9 @@ find_fw_domain(struct intel_uncore *uncore, u32 offset) { const struct intel_forcewake_range *entry; + if (IS_GSI_REG(offset)) + offset += uncore->gsi_offset; + entry = BSEARCH(offset, uncore->fw_domains_table, uncore->fw_domains_table_entries, @@ -1133,6 +1146,9 @@ static bool is_shadowed(struct intel_uncore *uncore, u32 offset) if (drm_WARN_ON(&uncore->i915->drm, !uncore->shadowed_reg_table)) return false; + if (IS_GSI_REG(offset)) + offset += uncore->gsi_offset; + return BSEARCH(offset, uncore->shadowed_reg_table, uncore->shadowed_reg_table_entries, @@ -1704,7 +1720,7 @@ unclaimed_reg_debug(struct intel_uncore *uncore, const bool read, const bool before) { - if (likely(!uncore->i915->params.mmio_debug)) + if (likely(!uncore->i915->params.mmio_debug) || !uncore->debug) return; /* interrupts are disabled and re-enabled around uncore->lock usage */ @@ -1985,8 +2001,8 @@ static int __fw_domain_init(struct intel_uncore *uncore, d->uncore = uncore; d->wake_count = 0; - d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set); - d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack); + d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set) + uncore->gsi_offset; + d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack) + uncore->gsi_offset; d->id = domain_id; @@ -2070,7 +2086,7 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore) if (GRAPHICS_VER(i915) >= 11) { /* we'll prune the domains of missing engines later */ - intel_engine_mask_t emask = INTEL_INFO(i915)->platform_engine_mask; + intel_engine_mask_t emask = RUNTIME_INFO(i915)->platform_engine_mask; int i; uncore->fw_get_funcs = &uncore_get_fallback; @@ -2223,6 +2239,11 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb, return NOTIFY_OK; } +static void uncore_unmap_mmio(struct drm_device *drm, void *regs) +{ + iounmap(regs); +} + int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr) { struct drm_i915_private *i915 = uncore->i915; @@ -2251,12 +2272,7 @@ int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr) return -EIO; } - return 0; -} - -void intel_uncore_cleanup_mmio(struct intel_uncore *uncore) -{ - iounmap(uncore->regs); + return drmm_add_action_or_reset(&i915->drm, uncore_unmap_mmio, uncore->regs); } void intel_uncore_init_early(struct intel_uncore *uncore, @@ -2266,7 +2282,6 @@ void intel_uncore_init_early(struct intel_uncore *uncore, uncore->i915 = gt->i915; uncore->gt = gt; uncore->rpm = >->i915->runtime_pm; - uncore->debug = >->i915->mmio_debug; } static void uncore_raw_init(struct intel_uncore *uncore) @@ -2446,8 +2461,11 @@ void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore, } } -void intel_uncore_fini_mmio(struct intel_uncore *uncore) +/* Called via drm-managed action */ +void intel_uncore_fini_mmio(struct drm_device *dev, void *data) { + struct intel_uncore *uncore = data; + if (intel_uncore_has_forcewake(uncore)) { iosf_mbi_punit_acquire(); iosf_mbi_unregister_pmic_bus_access_notifier_unlocked( @@ -2577,6 +2595,9 @@ bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore) { bool ret; + if (!uncore->debug) + return false; + spin_lock_irq(&uncore->debug->lock); ret = check_for_unclaimed_mmio(uncore); spin_unlock_irq(&uncore->debug->lock); @@ -2589,6 +2610,9 @@ intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore) { bool ret = false; + if (drm_WARN_ON(&uncore->i915->drm, !uncore->debug)) + return false; + spin_lock_irq(&uncore->debug->lock); if (unlikely(uncore->debug->unclaimed_mmio_check <= 0)) diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h index b1fa912a65e7..5022bac80b67 100644 --- a/drivers/gpu/drm/i915/intel_uncore.h +++ b/drivers/gpu/drm/i915/intel_uncore.h @@ -33,6 +33,7 @@ #include "i915_reg_defs.h" +struct drm_device; struct drm_i915_private; struct intel_runtime_pm; struct intel_uncore; @@ -135,6 +136,16 @@ struct intel_uncore { spinlock_t lock; /** lock is also taken in irq contexts. */ + /* + * Do we need to apply an additional offset to reach the beginning + * of the basic non-engine GT registers (referred to as "GSI" on + * newer platforms, or "GT block" on older platforms)? If so, we'll + * track that here and apply it transparently to registers in the + * appropriate range to maintain compatibility with our existing + * register definitions and GT code. + */ + u32 gsi_offset; + unsigned int flags; #define UNCORE_HAS_FORCEWAKE BIT(0) #define UNCORE_HAS_FPGA_DBG_UNCLAIMED BIT(1) @@ -210,8 +221,7 @@ intel_uncore_has_fifo(const struct intel_uncore *uncore) return uncore->flags & UNCORE_HAS_FIFO; } -void -intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug); +void intel_uncore_mmio_debug_init_early(struct drm_i915_private *i915); void intel_uncore_init_early(struct intel_uncore *uncore, struct intel_gt *gt); int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr); @@ -221,7 +231,7 @@ void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore, bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore); bool intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore); void intel_uncore_cleanup_mmio(struct intel_uncore *uncore); -void intel_uncore_fini_mmio(struct intel_uncore *uncore); +void intel_uncore_fini_mmio(struct drm_device *dev, void *data); void intel_uncore_suspend(struct intel_uncore *uncore); void intel_uncore_resume_early(struct intel_uncore *uncore); void intel_uncore_runtime_resume(struct intel_uncore *uncore); @@ -294,19 +304,27 @@ intel_wait_for_register_fw(struct intel_uncore *uncore, 2, timeout_ms, NULL); } +#define IS_GSI_REG(reg) ((reg) < 0x40000) + /* register access functions */ #define __raw_read(x__, s__) \ static inline u##x__ __raw_uncore_read##x__(const struct intel_uncore *uncore, \ i915_reg_t reg) \ { \ - return read##s__(uncore->regs + i915_mmio_reg_offset(reg)); \ + u32 offset = i915_mmio_reg_offset(reg); \ + if (IS_GSI_REG(offset)) \ + offset += uncore->gsi_offset; \ + return read##s__(uncore->regs + offset); \ } #define __raw_write(x__, s__) \ static inline void __raw_uncore_write##x__(const struct intel_uncore *uncore, \ i915_reg_t reg, u##x__ val) \ { \ - write##s__(val, uncore->regs + i915_mmio_reg_offset(reg)); \ + u32 offset = i915_mmio_reg_offset(reg); \ + if (IS_GSI_REG(offset)) \ + offset += uncore->gsi_offset; \ + write##s__(val, uncore->regs + offset); \ } __raw_read(8, b) __raw_read(16, w) @@ -447,6 +465,18 @@ static inline int intel_uncore_write_and_verify(struct intel_uncore *uncore, return (reg_val & mask) != expected_val ? -EINVAL : 0; } +/* + * The raw_reg_{read,write} macros are intended as a micro-optimization for + * interrupt handlers so that the pointer indirection on uncore->regs can + * be computed once (and presumably cached in a register) instead of generating + * extra load instructions for each MMIO access. + * + * Given that these macros are only intended for non-GSI interrupt registers + * (and the goal is to avoid extra instructions generated by the compiler), + * these macros do not account for uncore->gsi_offset. Any caller that needs + * to use these macros on a GSI register is responsible for adding the + * appropriate GSI offset to the 'base' parameter. + */ #define raw_reg_read(base, reg) \ readl(base + i915_mmio_reg_offset(reg)) #define raw_reg_write(base, reg, value) \ diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.c b/drivers/gpu/drm/i915/pxp/intel_pxp.c index 17109c513259..69cdaaddc4a9 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp.c @@ -169,11 +169,11 @@ static void pxp_queue_termination(struct intel_pxp *pxp) * We want to get the same effect as if we received a termination * interrupt, so just pretend that we did. */ - spin_lock_irq(>->irq_lock); + spin_lock_irq(gt->irq_lock); intel_pxp_mark_termination_in_progress(pxp); pxp->session_events |= PXP_TERMINATION_REQUEST; queue_work(system_unbound_wq, &pxp->session_work); - spin_unlock_irq(>->irq_lock); + spin_unlock_irq(gt->irq_lock); } static bool pxp_component_bound(struct intel_pxp *pxp) diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c index e888b5124a07..4359e8be4101 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c @@ -47,9 +47,9 @@ static int pxp_terminate_set(void *data, u64 val) return -ENODEV; /* simulate a termination interrupt */ - spin_lock_irq(>->irq_lock); + spin_lock_irq(gt->irq_lock); intel_pxp_irq_handler(pxp, GEN12_DISPLAY_PXP_STATE_TERMINATED_INTERRUPT); - spin_unlock_irq(>->irq_lock); + spin_unlock_irq(gt->irq_lock); if (!wait_for_completion_timeout(&pxp->termination, msecs_to_jiffies(100))) diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c index 04745f914407..c28be430718a 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c @@ -25,7 +25,7 @@ void intel_pxp_irq_handler(struct intel_pxp *pxp, u16 iir) if (GEM_WARN_ON(!intel_pxp_is_enabled(pxp))) return; - lockdep_assert_held(>->irq_lock); + lockdep_assert_held(gt->irq_lock); if (unlikely(!iir)) return; @@ -55,16 +55,16 @@ static inline void __pxp_set_interrupts(struct intel_gt *gt, u32 interrupts) static inline void pxp_irq_reset(struct intel_gt *gt) { - spin_lock_irq(>->irq_lock); + spin_lock_irq(gt->irq_lock); gen11_gt_reset_one_iir(gt, 0, GEN11_KCR); - spin_unlock_irq(>->irq_lock); + spin_unlock_irq(gt->irq_lock); } void intel_pxp_irq_enable(struct intel_pxp *pxp) { struct intel_gt *gt = pxp_to_gt(pxp); - spin_lock_irq(>->irq_lock); + spin_lock_irq(gt->irq_lock); if (!pxp->irq_enabled) WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_KCR)); @@ -72,7 +72,7 @@ void intel_pxp_irq_enable(struct intel_pxp *pxp) __pxp_set_interrupts(gt, GEN12_PXP_INTERRUPTS); pxp->irq_enabled = true; - spin_unlock_irq(>->irq_lock); + spin_unlock_irq(gt->irq_lock); } void intel_pxp_irq_disable(struct intel_pxp *pxp) @@ -88,12 +88,12 @@ void intel_pxp_irq_disable(struct intel_pxp *pxp) */ GEM_WARN_ON(intel_pxp_is_active(pxp)); - spin_lock_irq(>->irq_lock); + spin_lock_irq(gt->irq_lock); pxp->irq_enabled = false; __pxp_set_interrupts(gt, 0); - spin_unlock_irq(>->irq_lock); + spin_unlock_irq(gt->irq_lock); intel_synchronize_irq(gt->i915); pxp_irq_reset(gt); diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c index 92b00b4de240..1bb5b5249157 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c @@ -144,9 +144,9 @@ void intel_pxp_session_work(struct work_struct *work) intel_wakeref_t wakeref; u32 events = 0; - spin_lock_irq(>->irq_lock); + spin_lock_irq(gt->irq_lock); events = fetch_and_zero(&pxp->session_events); - spin_unlock_irq(>->irq_lock); + spin_unlock_irq(gt->irq_lock); if (!events) return; diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 9c31a16f8380..fff11c90f1fa 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -115,6 +115,7 @@ static struct dev_pm_domain pm_domain = { static void mock_gt_probe(struct drm_i915_private *i915) { i915->gt[0] = &i915->gt0; + i915->gt[0]->name = "Mock GT"; } struct drm_i915_private *mock_gem_device(void) @@ -172,14 +173,14 @@ struct drm_i915_private *mock_gem_device(void) /* Using the global GTT may ask questions about KMS users, so prepare */ drm_mode_config_init(&i915->drm); - mkwrite_device_info(i915)->graphics.ver = -1; + RUNTIME_INFO(i915)->graphics.ip.ver = -1; - mkwrite_device_info(i915)->page_sizes = + RUNTIME_INFO(i915)->page_sizes = I915_GTT_PAGE_SIZE_4K | I915_GTT_PAGE_SIZE_64K | I915_GTT_PAGE_SIZE_2M; - mkwrite_device_info(i915)->memory_regions = REGION_SMEM; + RUNTIME_INFO(i915)->memory_regions = REGION_SMEM; intel_memory_regions_hw_probe(i915); spin_lock_init(&i915->gpu_error.lock); @@ -209,7 +210,7 @@ struct drm_i915_private *mock_gem_device(void) mock_init_ggtt(to_gt(i915)); to_gt(i915)->vm = i915_vm_get(&to_gt(i915)->ggtt->vm); - mkwrite_device_info(i915)->platform_engine_mask = BIT(0); + RUNTIME_INFO(i915)->platform_engine_mask = BIT(0); to_gt(i915)->info.engine_mask = BIT(0); to_gt(i915)->engine[RCS0] = mock_engine(i915, "mock", RCS0); diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c index 59506ba6fc48..79305e4acce2 100644 --- a/drivers/misc/mei/bus-fixup.c +++ b/drivers/misc/mei/bus-fixup.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (c) 2013-2020, Intel Corporation. All rights reserved. + * Copyright (c) 2013-2022, Intel Corporation. All rights reserved. * Intel Management Engine Interface (Intel MEI) Linux driver */ @@ -15,6 +15,7 @@ #include "mei_dev.h" #include "client.h" +#include "mkhi.h" #define MEI_UUID_NFC_INFO UUID_LE(0xd2de1625, 0x382d, 0x417d, \ 0x48, 0xa4, 0xef, 0xab, 0xba, 0x8a, 0x12, 0x06) @@ -89,20 +90,6 @@ struct mei_os_ver { u8 reserved2; } __packed; -#define MKHI_FEATURE_PTT 0x10 - -struct mkhi_rule_id { - __le16 rule_type; - u8 feature_id; - u8 reserved; -} __packed; - -struct mkhi_fwcaps { - struct mkhi_rule_id id; - u8 len; - u8 data[]; -} __packed; - struct mkhi_fw_ver_block { u16 minor; u8 major; @@ -115,22 +102,6 @@ struct mkhi_fw_ver { struct mkhi_fw_ver_block ver[MEI_MAX_FW_VER_BLOCKS]; } __packed; -#define MKHI_FWCAPS_GROUP_ID 0x3 -#define MKHI_FWCAPS_SET_OS_VER_APP_RULE_CMD 6 -#define MKHI_GEN_GROUP_ID 0xFF -#define MKHI_GEN_GET_FW_VERSION_CMD 0x2 -struct mkhi_msg_hdr { - u8 group_id; - u8 command; - u8 reserved; - u8 result; -} __packed; - -struct mkhi_msg { - struct mkhi_msg_hdr hdr; - u8 data[]; -} __packed; - #define MKHI_OSVER_BUF_LEN (sizeof(struct mkhi_msg_hdr) + \ sizeof(struct mkhi_fwcaps) + \ sizeof(struct mei_os_ver)) @@ -164,7 +135,6 @@ static int mei_osver(struct mei_cl_device *cldev) sizeof(struct mkhi_fw_ver)) #define MKHI_FWVER_LEN(__num) (sizeof(struct mkhi_msg_hdr) + \ sizeof(struct mkhi_fw_ver_block) * (__num)) -#define MKHI_RCV_TIMEOUT 500 /* receive timeout in msec */ static int mei_fwver(struct mei_cl_device *cldev) { char buf[MKHI_FWVER_BUF_LEN]; @@ -187,7 +157,7 @@ static int mei_fwver(struct mei_cl_device *cldev) ret = 0; bytes_recv = __mei_cl_recv(cldev->cl, buf, sizeof(buf), NULL, 0, - MKHI_RCV_TIMEOUT); + cldev->bus->timeouts.mkhi_recv); if (bytes_recv < 0 || (size_t)bytes_recv < MKHI_FWVER_LEN(1)) { /* * Should be at least one version block, @@ -218,6 +188,19 @@ static int mei_fwver(struct mei_cl_device *cldev) return ret; } +static int mei_gfx_memory_ready(struct mei_cl_device *cldev) +{ + struct mkhi_gfx_mem_ready req = {0}; + unsigned int mode = MEI_CL_IO_TX_INTERNAL; + + req.hdr.group_id = MKHI_GROUP_ID_GFX; + req.hdr.command = MKHI_GFX_MEMORY_READY_CMD_REQ; + req.flags = MKHI_GFX_MEM_READY_PXP_ALLOWED; + + dev_dbg(&cldev->dev, "Sending memory ready command\n"); + return __mei_cl_send(cldev->cl, (u8 *)&req, sizeof(req), 0, mode); +} + static void mei_mkhi_fix(struct mei_cl_device *cldev) { int ret; @@ -264,6 +247,39 @@ static void mei_gsc_mkhi_ver(struct mei_cl_device *cldev) dev_err(&cldev->dev, "FW version command failed %d\n", ret); mei_cldev_disable(cldev); } + +static void mei_gsc_mkhi_fix_ver(struct mei_cl_device *cldev) +{ + int ret; + + /* No need to enable the client if nothing is needed from it */ + if (!cldev->bus->fw_f_fw_ver_supported && + cldev->bus->pxp_mode != MEI_DEV_PXP_INIT) + return; + + ret = mei_cldev_enable(cldev); + if (ret) + return; + + if (cldev->bus->pxp_mode == MEI_DEV_PXP_INIT) { + ret = mei_gfx_memory_ready(cldev); + if (ret < 0) + dev_err(&cldev->dev, "memory ready command failed %d\n", ret); + else + dev_dbg(&cldev->dev, "memory ready command sent\n"); + /* we go to reset after that */ + cldev->bus->pxp_mode = MEI_DEV_PXP_SETUP; + goto out; + } + + ret = mei_fwver(cldev); + if (ret < 0) + dev_err(&cldev->dev, "FW version command failed %d\n", + ret); +out: + mei_cldev_disable(cldev); +} + /** * mei_wd - wd client on the bus, change protocol version * as the API has changed. @@ -503,6 +519,26 @@ static void vt_support(struct mei_cl_device *cldev) cldev->do_match = 1; } +/** + * pxp_is_ready - enable bus client if pxp is ready + * + * @cldev: me clients device + */ +static void pxp_is_ready(struct mei_cl_device *cldev) +{ + struct mei_device *bus = cldev->bus; + + switch (bus->pxp_mode) { + case MEI_DEV_PXP_READY: + case MEI_DEV_PXP_DEFAULT: + cldev->do_match = 1; + break; + default: + cldev->do_match = 0; + break; + } +} + #define MEI_FIXUP(_uuid, _hook) { _uuid, _hook } static struct mei_fixup { @@ -516,10 +552,10 @@ static struct mei_fixup { MEI_FIXUP(MEI_UUID_WD, mei_wd), MEI_FIXUP(MEI_UUID_MKHIF_FIX, mei_mkhi_fix), MEI_FIXUP(MEI_UUID_IGSC_MKHI, mei_gsc_mkhi_ver), - MEI_FIXUP(MEI_UUID_IGSC_MKHI_FIX, mei_gsc_mkhi_ver), + MEI_FIXUP(MEI_UUID_IGSC_MKHI_FIX, mei_gsc_mkhi_fix_ver), MEI_FIXUP(MEI_UUID_HDCP, whitelist), MEI_FIXUP(MEI_UUID_ANY, vt_support), - MEI_FIXUP(MEI_UUID_PAVP, whitelist), + MEI_FIXUP(MEI_UUID_PAVP, pxp_is_ready), }; /** diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index 31264ab2eb13..0b2fbe1335a7 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (c) 2003-2020, Intel Corporation. All rights reserved. + * Copyright (c) 2003-2022, Intel Corporation. All rights reserved. * Intel Management Engine Interface (Intel MEI) Linux driver */ @@ -870,7 +870,7 @@ static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb) } list_move_tail(&cb->list, &dev->ctrl_rd_list); - cl->timer_count = MEI_CONNECT_TIMEOUT; + cl->timer_count = dev->timeouts.connect; mei_schedule_stall_timer(dev); return 0; @@ -945,7 +945,7 @@ static int __mei_cl_disconnect(struct mei_cl *cl) wait_event_timeout(cl->wait, cl->state == MEI_FILE_DISCONNECT_REPLY || cl->state == MEI_FILE_DISCONNECTED, - mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); + dev->timeouts.cl_connect); mutex_lock(&dev->device_lock); rets = cl->status; @@ -1065,7 +1065,7 @@ static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb) } list_move_tail(&cb->list, &dev->ctrl_rd_list); - cl->timer_count = MEI_CONNECT_TIMEOUT; + cl->timer_count = dev->timeouts.connect; mei_schedule_stall_timer(dev); return 0; } @@ -1164,7 +1164,7 @@ int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl, cl->state == MEI_FILE_DISCONNECTED || cl->state == MEI_FILE_DISCONNECT_REQUIRED || cl->state == MEI_FILE_DISCONNECT_REPLY), - mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); + dev->timeouts.cl_connect); mutex_lock(&dev->device_lock); if (!mei_cl_is_connected(cl)) { @@ -1562,7 +1562,7 @@ int mei_cl_notify_request(struct mei_cl *cl, cl->notify_en == request || cl->status || !mei_cl_is_connected(cl), - mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); + dev->timeouts.cl_connect); mutex_lock(&dev->device_lock); if (cl->notify_en != request && !cl->status) @@ -2336,7 +2336,7 @@ int mei_cl_dma_alloc_and_map(struct mei_cl *cl, const struct file *fp, mutex_unlock(&dev->device_lock); wait_event_timeout(cl->wait, cl->dma_mapped || cl->status, - mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); + dev->timeouts.cl_connect); mutex_lock(&dev->device_lock); if (!cl->dma_mapped && !cl->status) @@ -2415,7 +2415,7 @@ int mei_cl_dma_unmap(struct mei_cl *cl, const struct file *fp) mutex_unlock(&dev->device_lock); wait_event_timeout(cl->wait, !cl->dma_mapped || cl->status, - mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); + dev->timeouts.cl_connect); mutex_lock(&dev->device_lock); if (cl->dma_mapped && !cl->status) diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c index 1ce61e9e24fc..3b098d4c8e3d 100644 --- a/drivers/misc/mei/debugfs.c +++ b/drivers/misc/mei/debugfs.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (c) 2012-2016, Intel Corporation. All rights reserved + * Copyright (c) 2012-2022, Intel Corporation. All rights reserved * Intel Management Engine Interface (Intel MEI) Linux driver */ @@ -86,6 +86,20 @@ out: } DEFINE_SHOW_ATTRIBUTE(mei_dbgfs_active); +static const char *mei_dev_pxp_mode_str(enum mei_dev_pxp_mode state) +{ +#define MEI_PXP_MODE(state) case MEI_DEV_PXP_##state: return #state + switch (state) { + MEI_PXP_MODE(DEFAULT); + MEI_PXP_MODE(INIT); + MEI_PXP_MODE(SETUP); + MEI_PXP_MODE(READY); + default: + return "unknown"; + } +#undef MEI_PXP_MODE +} + static int mei_dbgfs_devstate_show(struct seq_file *m, void *unused) { struct mei_device *dev = m->private; @@ -112,6 +126,9 @@ static int mei_dbgfs_devstate_show(struct seq_file *m, void *unused) seq_printf(m, "pg: %s, %s\n", mei_pg_is_enabled(dev) ? "ENABLED" : "DISABLED", mei_pg_state_str(mei_pg_state(dev))); + + seq_printf(m, "pxp: %s\n", mei_dev_pxp_mode_str(dev->pxp_mode)); + return 0; } DEFINE_SHOW_ATTRIBUTE(mei_dbgfs_devstate); diff --git a/drivers/misc/mei/gsc-me.c b/drivers/misc/mei/gsc-me.c index c8145e9b62b6..75765e4df4ed 100644 --- a/drivers/misc/mei/gsc-me.c +++ b/drivers/misc/mei/gsc-me.c @@ -13,6 +13,7 @@ #include <linux/ktime.h> #include <linux/delay.h> #include <linux/pm_runtime.h> +#include <linux/kthread.h> #include "mei_dev.h" #include "hw-me.h" @@ -31,6 +32,17 @@ static int mei_gsc_read_hfs(const struct mei_device *dev, int where, u32 *val) return 0; } +static void mei_gsc_set_ext_op_mem(const struct mei_me_hw *hw, struct resource *mem) +{ + u32 low = lower_32_bits(mem->start); + u32 hi = upper_32_bits(mem->start); + u32 limit = (resource_size(mem) / SZ_4K) | GSC_EXT_OP_MEM_VALID; + + iowrite32(low, hw->mem_addr + H_GSC_EXT_OP_MEM_BASE_ADDR_LO_REG); + iowrite32(hi, hw->mem_addr + H_GSC_EXT_OP_MEM_BASE_ADDR_HI_REG); + iowrite32(limit, hw->mem_addr + H_GSC_EXT_OP_MEM_LIMIT_REG); +} + static int mei_gsc_probe(struct auxiliary_device *aux_dev, const struct auxiliary_device_id *aux_dev_id) { @@ -47,7 +59,7 @@ static int mei_gsc_probe(struct auxiliary_device *aux_dev, device = &aux_dev->dev; - dev = mei_me_dev_init(device, cfg); + dev = mei_me_dev_init(device, cfg, adev->slow_firmware); if (!dev) { ret = -ENOMEM; goto err; @@ -66,13 +78,33 @@ static int mei_gsc_probe(struct auxiliary_device *aux_dev, dev_set_drvdata(device, dev); - ret = devm_request_threaded_irq(device, hw->irq, - mei_me_irq_quick_handler, - mei_me_irq_thread_handler, - IRQF_ONESHOT, KBUILD_MODNAME, dev); - if (ret) { - dev_err(device, "irq register failed %d\n", ret); - goto err; + if (adev->ext_op_mem.start) { + mei_gsc_set_ext_op_mem(hw, &adev->ext_op_mem); + dev->pxp_mode = MEI_DEV_PXP_INIT; + } + + /* use polling */ + if (mei_me_hw_use_polling(hw)) { + mei_disable_interrupts(dev); + mei_clear_interrupts(dev); + init_waitqueue_head(&hw->wait_active); + hw->is_active = true; /* start in active mode for initialization */ + hw->polling_thread = kthread_run(mei_me_polling_thread, dev, + "kmegscirqd/%s", dev_name(device)); + if (IS_ERR(hw->polling_thread)) { + ret = PTR_ERR(hw->polling_thread); + dev_err(device, "unable to create kernel thread: %d\n", ret); + goto err; + } + } else { + ret = devm_request_threaded_irq(device, hw->irq, + mei_me_irq_quick_handler, + mei_me_irq_thread_handler, + IRQF_ONESHOT, KBUILD_MODNAME, dev); + if (ret) { + dev_err(device, "irq register failed %d\n", ret); + goto err; + } } pm_runtime_get_noresume(device); @@ -98,7 +130,8 @@ static int mei_gsc_probe(struct auxiliary_device *aux_dev, register_err: mei_stop(dev); - devm_free_irq(device, hw->irq, dev); + if (!mei_me_hw_use_polling(hw)) + devm_free_irq(device, hw->irq, dev); err: dev_err(device, "probe failed: %d\n", ret); @@ -119,12 +152,17 @@ static void mei_gsc_remove(struct auxiliary_device *aux_dev) mei_stop(dev); + hw = to_me_hw(dev); + if (mei_me_hw_use_polling(hw)) + kthread_stop(hw->polling_thread); + mei_deregister(dev); pm_runtime_disable(&aux_dev->dev); mei_disable_interrupts(dev); - devm_free_irq(&aux_dev->dev, hw->irq, dev); + if (!mei_me_hw_use_polling(hw)) + devm_free_irq(&aux_dev->dev, hw->irq, dev); } static int __maybe_unused mei_gsc_pm_suspend(struct device *device) @@ -144,11 +182,22 @@ static int __maybe_unused mei_gsc_pm_suspend(struct device *device) static int __maybe_unused mei_gsc_pm_resume(struct device *device) { struct mei_device *dev = dev_get_drvdata(device); + struct auxiliary_device *aux_dev; + struct mei_aux_device *adev; int err; + struct mei_me_hw *hw; if (!dev) return -ENODEV; + hw = to_me_hw(dev); + aux_dev = to_auxiliary_dev(device); + adev = auxiliary_dev_to_mei_aux_dev(aux_dev); + if (adev->ext_op_mem.start) { + mei_gsc_set_ext_op_mem(hw, &adev->ext_op_mem); + dev->pxp_mode = MEI_DEV_PXP_INIT; + } + err = mei_restart(dev); if (err) return err; @@ -185,6 +234,9 @@ static int __maybe_unused mei_gsc_pm_runtime_suspend(struct device *device) if (mei_write_is_idle(dev)) { hw = to_me_hw(dev); hw->pg_state = MEI_PG_ON; + + if (mei_me_hw_use_polling(hw)) + hw->is_active = false; ret = 0; } else { ret = -EAGAIN; @@ -209,6 +261,11 @@ static int __maybe_unused mei_gsc_pm_runtime_resume(struct device *device) hw = to_me_hw(dev); hw->pg_state = MEI_PG_OFF; + if (mei_me_hw_use_polling(hw)) { + hw->is_active = true; + wake_up(&hw->wait_active); + } + mutex_unlock(&dev->device_lock); irq_ret = mei_me_irq_thread_handler(1, dev); diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c index cf2b8261da14..de712cbf5d07 100644 --- a/drivers/misc/mei/hbm.c +++ b/drivers/misc/mei/hbm.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (c) 2003-2020, Intel Corporation. All rights reserved. + * Copyright (c) 2003-2022, Intel Corporation. All rights reserved. * Intel Management Engine Interface (Intel MEI) Linux driver */ #include <linux/export.h> @@ -232,7 +232,7 @@ int mei_hbm_start_wait(struct mei_device *dev) mutex_unlock(&dev->device_lock); ret = wait_event_timeout(dev->wait_hbm_start, dev->hbm_state != MEI_HBM_STARTING, - mei_secs_to_jiffies(MEI_HBM_TIMEOUT)); + dev->timeouts.hbm); mutex_lock(&dev->device_lock); if (ret == 0 && (dev->hbm_state <= MEI_HBM_STARTING)) { @@ -275,7 +275,7 @@ int mei_hbm_start_req(struct mei_device *dev) } dev->hbm_state = MEI_HBM_STARTING; - dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; + dev->init_clients_timer = dev->timeouts.client_init; mei_schedule_stall_timer(dev); return 0; } @@ -316,7 +316,7 @@ static int mei_hbm_dma_setup_req(struct mei_device *dev) } dev->hbm_state = MEI_HBM_DR_SETUP; - dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; + dev->init_clients_timer = dev->timeouts.client_init; mei_schedule_stall_timer(dev); return 0; } @@ -351,7 +351,7 @@ static int mei_hbm_capabilities_req(struct mei_device *dev) } dev->hbm_state = MEI_HBM_CAP_SETUP; - dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; + dev->init_clients_timer = dev->timeouts.client_init; mei_schedule_stall_timer(dev); return 0; } @@ -385,7 +385,7 @@ static int mei_hbm_enum_clients_req(struct mei_device *dev) return ret; } dev->hbm_state = MEI_HBM_ENUM_CLIENTS; - dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; + dev->init_clients_timer = dev->timeouts.client_init; mei_schedule_stall_timer(dev); return 0; } @@ -751,7 +751,7 @@ static int mei_hbm_prop_req(struct mei_device *dev, unsigned long start_idx) return ret; } - dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; + dev->init_clients_timer = dev->timeouts.client_init; mei_schedule_stall_timer(dev); return 0; diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index 15e8e2b322b1..99966cd3e7d8 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (c) 2003-2019, Intel Corporation. All rights reserved. + * Copyright (c) 2003-2022, Intel Corporation. All rights reserved. * Intel Management Engine Interface (Intel MEI) Linux driver */ #ifndef _MEI_HW_MEI_REGS_H_ @@ -127,6 +127,8 @@ # define PCI_CFG_HFS_3_FW_SKU_SPS 0x00000060 #define PCI_CFG_HFS_4 0x64 #define PCI_CFG_HFS_5 0x68 +# define GSC_CFG_HFS_5_BOOT_TYPE_MSK 0x00000003 +# define GSC_CFG_HFS_5_BOOT_TYPE_PXP 3 #define PCI_CFG_HFS_6 0x6C /* MEI registers */ @@ -143,6 +145,11 @@ /* H_D0I3C - D0I3 Control */ #define H_D0I3C 0x800 +#define H_GSC_EXT_OP_MEM_BASE_ADDR_LO_REG 0x100 +#define H_GSC_EXT_OP_MEM_BASE_ADDR_HI_REG 0x104 +#define H_GSC_EXT_OP_MEM_LIMIT_REG 0x108 +#define GSC_EXT_OP_MEM_VALID BIT(31) + /* register bits of H_CSR (Host Control Status register) */ /* Host Circular Buffer Depth - maximum number of 32-bit entries in CB */ #define H_CBD 0xFF000000 diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c index 3a95fe7d4e33..9e2f781c6ed5 100644 --- a/drivers/misc/mei/hw-me.c +++ b/drivers/misc/mei/hw-me.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (c) 2003-2020, Intel Corporation. All rights reserved. + * Copyright (c) 2003-2022, Intel Corporation. All rights reserved. * Intel Management Engine Interface (Intel MEI) Linux driver */ @@ -10,6 +10,7 @@ #include <linux/interrupt.h> #include <linux/pm_runtime.h> #include <linux/sizes.h> +#include <linux/delay.h> #include "mei_dev.h" #include "hbm.h" @@ -327,9 +328,12 @@ static void mei_me_intr_clear(struct mei_device *dev) */ static void mei_me_intr_enable(struct mei_device *dev) { - u32 hcsr = mei_hcsr_read(dev); + u32 hcsr; + + if (mei_me_hw_use_polling(to_me_hw(dev))) + return; - hcsr |= H_CSR_IE_MASK; + hcsr = mei_hcsr_read(dev) | H_CSR_IE_MASK; mei_hcsr_set(dev, hcsr); } @@ -354,6 +358,9 @@ static void mei_me_synchronize_irq(struct mei_device *dev) { struct mei_me_hw *hw = to_me_hw(dev); + if (mei_me_hw_use_polling(hw)) + return; + synchronize_irq(hw->irq); } @@ -380,7 +387,10 @@ static void mei_me_host_set_ready(struct mei_device *dev) { u32 hcsr = mei_hcsr_read(dev); - hcsr |= H_CSR_IE_MASK | H_IG | H_RDY; + if (!mei_me_hw_use_polling(to_me_hw(dev))) + hcsr |= H_CSR_IE_MASK; + + hcsr |= H_IG | H_RDY; mei_hcsr_set(dev, hcsr); } @@ -424,6 +434,29 @@ static bool mei_me_hw_is_resetting(struct mei_device *dev) } /** + * mei_gsc_pxp_check - check for gsc firmware entering pxp mode + * + * @dev: the device structure + */ +static void mei_gsc_pxp_check(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + u32 fwsts5 = 0; + + if (dev->pxp_mode == MEI_DEV_PXP_DEFAULT) + return; + + hw->read_fws(dev, PCI_CFG_HFS_5, &fwsts5); + trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_5", PCI_CFG_HFS_5, fwsts5); + if ((fwsts5 & GSC_CFG_HFS_5_BOOT_TYPE_MSK) == GSC_CFG_HFS_5_BOOT_TYPE_PXP) { + dev_dbg(dev->dev, "pxp mode is ready 0x%08x\n", fwsts5); + dev->pxp_mode = MEI_DEV_PXP_READY; + } else { + dev_dbg(dev->dev, "pxp mode is not ready 0x%08x\n", fwsts5); + } +} + +/** * mei_me_hw_ready_wait - wait until the me(hw) has turned ready * or timeout is reached * @@ -435,13 +468,15 @@ static int mei_me_hw_ready_wait(struct mei_device *dev) mutex_unlock(&dev->device_lock); wait_event_timeout(dev->wait_hw_ready, dev->recvd_hw_ready, - mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT)); + dev->timeouts.hw_ready); mutex_lock(&dev->device_lock); if (!dev->recvd_hw_ready) { dev_err(dev->dev, "wait hw ready failed\n"); return -ETIME; } + mei_gsc_pxp_check(dev); + mei_me_hw_reset_release(dev); dev->recvd_hw_ready = false; return 0; @@ -697,7 +732,6 @@ static void mei_me_pg_unset(struct mei_device *dev) static int mei_me_pg_legacy_enter_sync(struct mei_device *dev) { struct mei_me_hw *hw = to_me_hw(dev); - unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT); int ret; dev->pg_event = MEI_PG_EVENT_WAIT; @@ -708,7 +742,8 @@ static int mei_me_pg_legacy_enter_sync(struct mei_device *dev) mutex_unlock(&dev->device_lock); wait_event_timeout(dev->wait_pg, - dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout); + dev->pg_event == MEI_PG_EVENT_RECEIVED, + dev->timeouts.pgi); mutex_lock(&dev->device_lock); if (dev->pg_event == MEI_PG_EVENT_RECEIVED) { @@ -734,7 +769,6 @@ static int mei_me_pg_legacy_enter_sync(struct mei_device *dev) static int mei_me_pg_legacy_exit_sync(struct mei_device *dev) { struct mei_me_hw *hw = to_me_hw(dev); - unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT); int ret; if (dev->pg_event == MEI_PG_EVENT_RECEIVED) @@ -746,7 +780,8 @@ static int mei_me_pg_legacy_exit_sync(struct mei_device *dev) mutex_unlock(&dev->device_lock); wait_event_timeout(dev->wait_pg, - dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout); + dev->pg_event == MEI_PG_EVENT_RECEIVED, + dev->timeouts.pgi); mutex_lock(&dev->device_lock); reply: @@ -762,7 +797,8 @@ reply: mutex_unlock(&dev->device_lock); wait_event_timeout(dev->wait_pg, - dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout); + dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, + dev->timeouts.pgi); mutex_lock(&dev->device_lock); if (dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED) @@ -877,8 +913,6 @@ static u32 mei_me_d0i3_unset(struct mei_device *dev) static int mei_me_d0i3_enter_sync(struct mei_device *dev) { struct mei_me_hw *hw = to_me_hw(dev); - unsigned long d0i3_timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT); - unsigned long pgi_timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT); int ret; u32 reg; @@ -900,7 +934,8 @@ static int mei_me_d0i3_enter_sync(struct mei_device *dev) mutex_unlock(&dev->device_lock); wait_event_timeout(dev->wait_pg, - dev->pg_event == MEI_PG_EVENT_RECEIVED, pgi_timeout); + dev->pg_event == MEI_PG_EVENT_RECEIVED, + dev->timeouts.pgi); mutex_lock(&dev->device_lock); if (dev->pg_event != MEI_PG_EVENT_RECEIVED) { @@ -920,7 +955,8 @@ static int mei_me_d0i3_enter_sync(struct mei_device *dev) mutex_unlock(&dev->device_lock); wait_event_timeout(dev->wait_pg, - dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, d0i3_timeout); + dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, + dev->timeouts.d0i3); mutex_lock(&dev->device_lock); if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) { @@ -980,7 +1016,6 @@ on: static int mei_me_d0i3_exit_sync(struct mei_device *dev) { struct mei_me_hw *hw = to_me_hw(dev); - unsigned long timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT); int ret; u32 reg; @@ -1003,7 +1038,8 @@ static int mei_me_d0i3_exit_sync(struct mei_device *dev) mutex_unlock(&dev->device_lock); wait_event_timeout(dev->wait_pg, - dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout); + dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, + dev->timeouts.d0i3); mutex_lock(&dev->device_lock); if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) { @@ -1176,7 +1212,7 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable) hcsr |= H_RST | H_IG | H_CSR_IS_MASK; - if (!intr_enable) + if (!intr_enable || mei_me_hw_use_polling(to_me_hw(dev))) hcsr &= ~H_CSR_IE_MASK; dev->recvd_hw_ready = false; @@ -1259,7 +1295,8 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id) /* check if ME wants a reset */ if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) { - dev_warn(dev->dev, "FW not ready: resetting.\n"); + dev_warn(dev->dev, "FW not ready: resetting: dev_state = %d pxp = %d\n", + dev->dev_state, dev->pxp_mode); if (dev->dev_state == MEI_DEV_POWERING_DOWN || dev->dev_state == MEI_DEV_POWER_DOWN) mei_cl_all_disconnect(dev); @@ -1331,6 +1368,66 @@ end: } EXPORT_SYMBOL_GPL(mei_me_irq_thread_handler); +#define MEI_POLLING_TIMEOUT_ACTIVE 100 +#define MEI_POLLING_TIMEOUT_IDLE 500 + +/** + * mei_me_polling_thread - interrupt register polling thread + * + * The thread monitors the interrupt source register and calls + * mei_me_irq_thread_handler() to handle the firmware + * input. + * + * The function polls in MEI_POLLING_TIMEOUT_ACTIVE timeout + * in case there was an event, in idle case the polling + * time increases yet again by MEI_POLLING_TIMEOUT_ACTIVE + * up to MEI_POLLING_TIMEOUT_IDLE. + * + * @_dev: mei device + * + * Return: always 0 + */ +int mei_me_polling_thread(void *_dev) +{ + struct mei_device *dev = _dev; + irqreturn_t irq_ret; + long polling_timeout = MEI_POLLING_TIMEOUT_ACTIVE; + + dev_dbg(dev->dev, "kernel thread is running\n"); + while (!kthread_should_stop()) { + struct mei_me_hw *hw = to_me_hw(dev); + u32 hcsr; + + wait_event_timeout(hw->wait_active, + hw->is_active || kthread_should_stop(), + msecs_to_jiffies(MEI_POLLING_TIMEOUT_IDLE)); + + if (kthread_should_stop()) + break; + + hcsr = mei_hcsr_read(dev); + if (me_intr_src(hcsr)) { + polling_timeout = MEI_POLLING_TIMEOUT_ACTIVE; + irq_ret = mei_me_irq_thread_handler(1, dev); + if (irq_ret != IRQ_HANDLED) + dev_err(dev->dev, "irq_ret %d\n", irq_ret); + } else { + /* + * Increase timeout by MEI_POLLING_TIMEOUT_ACTIVE + * up to MEI_POLLING_TIMEOUT_IDLE + */ + polling_timeout = clamp_val(polling_timeout + MEI_POLLING_TIMEOUT_ACTIVE, + MEI_POLLING_TIMEOUT_ACTIVE, + MEI_POLLING_TIMEOUT_IDLE); + } + + schedule_timeout_interruptible(msecs_to_jiffies(polling_timeout)); + } + + return 0; +} +EXPORT_SYMBOL_GPL(mei_me_polling_thread); + static const struct mei_hw_ops mei_me_hw_ops = { .trc_status = mei_me_trc_status, @@ -1636,11 +1733,12 @@ EXPORT_SYMBOL_GPL(mei_me_get_cfg); * * @parent: device associated with physical device (pci/platform) * @cfg: per device generation config + * @slow_fw: configure longer timeouts as FW is slow * * Return: The mei_device pointer on success, NULL on failure. */ struct mei_device *mei_me_dev_init(struct device *parent, - const struct mei_cfg *cfg) + const struct mei_cfg *cfg, bool slow_fw) { struct mei_device *dev; struct mei_me_hw *hw; @@ -1655,7 +1753,7 @@ struct mei_device *mei_me_dev_init(struct device *parent, for (i = 0; i < DMA_DSCR_NUM; i++) dev->dr_dscr[i].size = cfg->dma_size[i]; - mei_device_init(dev, parent, &mei_me_hw_ops); + mei_device_init(dev, parent, slow_fw, &mei_me_hw_ops); hw->cfg = cfg; dev->fw_f_fw_ver_supported = cfg->fw_ver_supported; diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h index a071c645e905..95cf830b7c7b 100644 --- a/drivers/misc/mei/hw-me.h +++ b/drivers/misc/mei/hw-me.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright (c) 2012-2020, Intel Corporation. All rights reserved. + * Copyright (c) 2012-2022, Intel Corporation. All rights reserved. * Intel Management Engine Interface (Intel MEI) Linux driver */ @@ -51,6 +51,9 @@ struct mei_cfg { * @d0i3_supported: di03 support * @hbuf_depth: depth of hardware host/write buffer in slots * @read_fws: read FW status register handler + * @polling_thread: interrupt polling thread + * @wait_active: the polling thread activity wait queue + * @is_active: the device is active */ struct mei_me_hw { const struct mei_cfg *cfg; @@ -60,10 +63,19 @@ struct mei_me_hw { bool d0i3_supported; u8 hbuf_depth; int (*read_fws)(const struct mei_device *dev, int where, u32 *val); + /* polling */ + struct task_struct *polling_thread; + wait_queue_head_t wait_active; + bool is_active; }; #define to_me_hw(dev) (struct mei_me_hw *)((dev)->hw) +static inline bool mei_me_hw_use_polling(const struct mei_me_hw *hw) +{ + return hw->irq < 0; +} + /** * enum mei_cfg_idx - indices to platform specific configurations. * @@ -120,12 +132,13 @@ enum mei_cfg_idx { const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx); struct mei_device *mei_me_dev_init(struct device *parent, - const struct mei_cfg *cfg); + const struct mei_cfg *cfg, bool slow_fw); int mei_me_pg_enter_sync(struct mei_device *dev); int mei_me_pg_exit_sync(struct mei_device *dev); irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id); irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id); +int mei_me_polling_thread(void *_dev); #endif /* _MEI_INTERFACE_H_ */ diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c index 00652c137cc7..9862c6cd3e32 100644 --- a/drivers/misc/mei/hw-txe.c +++ b/drivers/misc/mei/hw-txe.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (c) 2013-2020, Intel Corporation. All rights reserved. + * Copyright (c) 2013-2022, Intel Corporation. All rights reserved. * Intel Management Engine Interface (Intel MEI) Linux driver */ @@ -1201,7 +1201,7 @@ struct mei_device *mei_txe_dev_init(struct pci_dev *pdev) if (!dev) return NULL; - mei_device_init(dev, &pdev->dev, &mei_txe_hw_ops); + mei_device_init(dev, &pdev->dev, false, &mei_txe_hw_ops); hw = to_txe_hw(dev); diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h index b46077b17114..e7e020dba6b1 100644 --- a/drivers/misc/mei/hw.h +++ b/drivers/misc/mei/hw.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright (c) 2003-2020, Intel Corporation. All rights reserved + * Copyright (c) 2003-2022, Intel Corporation. All rights reserved * Intel Management Engine Interface (Intel MEI) Linux driver */ @@ -16,11 +16,16 @@ #define MEI_CONNECT_TIMEOUT 3 /* HPS: at least 2 seconds */ #define MEI_CL_CONNECT_TIMEOUT 15 /* HPS: Client Connect Timeout */ +#define MEI_CL_CONNECT_TIMEOUT_SLOW 30 /* HPS: Client Connect Timeout, slow FW */ #define MEI_CLIENTS_INIT_TIMEOUT 15 /* HPS: Clients Enumeration Timeout */ #define MEI_PGI_TIMEOUT 1 /* PG Isolation time response 1 sec */ #define MEI_D0I3_TIMEOUT 5 /* D0i3 set/unset max response time */ #define MEI_HBM_TIMEOUT 1 /* 1 second */ +#define MEI_HBM_TIMEOUT_SLOW 5 /* 5 second, slow FW */ + +#define MKHI_RCV_TIMEOUT 500 /* receive timeout in msec */ +#define MKHI_RCV_TIMEOUT_SLOW 10000 /* receive timeout in msec, slow FW */ /* * FW page size for DMA allocations diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c index eb052005ca86..bac8852aad51 100644 --- a/drivers/misc/mei/init.c +++ b/drivers/misc/mei/init.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (c) 2012-2019, Intel Corporation. All rights reserved. + * Copyright (c) 2012-2022, Intel Corporation. All rights reserved. * Intel Management Engine Interface (Intel MEI) Linux driver */ @@ -218,16 +218,6 @@ int mei_start(struct mei_device *dev) goto err; } - if (!mei_host_is_ready(dev)) { - dev_err(dev->dev, "host is not ready.\n"); - goto err; - } - - if (!mei_hw_is_ready(dev)) { - dev_err(dev->dev, "ME is not ready.\n"); - goto err; - } - if (!mei_hbm_version_is_supported(dev)) { dev_dbg(dev->dev, "MEI start failed.\n"); goto err; @@ -320,6 +310,8 @@ void mei_stop(struct mei_device *dev) mei_clear_interrupts(dev); mei_synchronize_irq(dev); + /* to catch HW-initiated reset */ + mei_cancel_work(dev); mutex_lock(&dev->device_lock); @@ -357,14 +349,16 @@ bool mei_write_is_idle(struct mei_device *dev) EXPORT_SYMBOL_GPL(mei_write_is_idle); /** - * mei_device_init -- initialize mei_device structure + * mei_device_init - initialize mei_device structure * * @dev: the mei device * @device: the device structure + * @slow_fw: configure longer timeouts as FW is slow * @hw_ops: hw operations */ void mei_device_init(struct mei_device *dev, struct device *device, + bool slow_fw, const struct mei_hw_ops *hw_ops) { /* setup our list array */ @@ -393,6 +387,8 @@ void mei_device_init(struct mei_device *dev, bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX); dev->open_handle_count = 0; + dev->pxp_mode = MEI_DEV_PXP_DEFAULT; + /* * Reserving the first client ID * 0: Reserved for MEI Bus Message communications @@ -402,6 +398,21 @@ void mei_device_init(struct mei_device *dev, dev->pg_event = MEI_PG_EVENT_IDLE; dev->ops = hw_ops; dev->dev = device; + + dev->timeouts.hw_ready = mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT); + dev->timeouts.connect = MEI_CONNECT_TIMEOUT; + dev->timeouts.client_init = MEI_CLIENTS_INIT_TIMEOUT; + dev->timeouts.pgi = mei_secs_to_jiffies(MEI_PGI_TIMEOUT); + dev->timeouts.d0i3 = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT); + if (slow_fw) { + dev->timeouts.cl_connect = mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT_SLOW); + dev->timeouts.hbm = mei_secs_to_jiffies(MEI_HBM_TIMEOUT_SLOW); + dev->timeouts.mkhi_recv = msecs_to_jiffies(MKHI_RCV_TIMEOUT_SLOW); + } else { + dev->timeouts.cl_connect = mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT); + dev->timeouts.hbm = mei_secs_to_jiffies(MEI_HBM_TIMEOUT); + dev->timeouts.mkhi_recv = msecs_to_jiffies(MKHI_RCV_TIMEOUT); + } } EXPORT_SYMBOL_GPL(mei_device_init); diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c index 786f7c8f7f61..930887e7e38d 100644 --- a/drivers/misc/mei/main.c +++ b/drivers/misc/mei/main.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (c) 2003-2020, Intel Corporation. All rights reserved. + * Copyright (c) 2003-2022, Intel Corporation. All rights reserved. * Intel Management Engine Interface (Intel MEI) Linux driver */ @@ -571,7 +571,7 @@ static int mei_ioctl_connect_vtag(struct file *file, cl->state == MEI_FILE_DISCONNECTED || cl->state == MEI_FILE_DISCONNECT_REQUIRED || cl->state == MEI_FILE_DISCONNECT_REPLY), - mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); + dev->timeouts.cl_connect); mutex_lock(&dev->device_lock); } diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h index 694f866f87ef..6bb3e1ba9ded 100644 --- a/drivers/misc/mei/mei_dev.h +++ b/drivers/misc/mei/mei_dev.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright (c) 2003-2019, Intel Corporation. All rights reserved. + * Copyright (c) 2003-2022, Intel Corporation. All rights reserved. * Intel Management Engine Interface (Intel MEI) Linux driver */ @@ -62,6 +62,21 @@ enum mei_dev_state { MEI_DEV_POWER_UP }; +/** + * enum mei_dev_pxp_mode - MEI PXP mode state + * + * @MEI_DEV_PXP_DEFAULT: PCH based device, no initailization required + * @MEI_DEV_PXP_INIT: device requires initialization, send setup message to firmware + * @MEI_DEV_PXP_SETUP: device is in setup stage, waiting for firmware repsonse + * @MEI_DEV_PXP_READY: device initialized + */ +enum mei_dev_pxp_mode { + MEI_DEV_PXP_DEFAULT = 0, + MEI_DEV_PXP_INIT = 1, + MEI_DEV_PXP_SETUP = 2, + MEI_DEV_PXP_READY = 3, +}; + const char *mei_dev_state_str(int state); enum mei_file_transaction_states { @@ -415,6 +430,17 @@ struct mei_fw_version { #define MEI_MAX_FW_VER_BLOCKS 3 +struct mei_dev_timeouts { + unsigned long hw_ready; /* Timeout on ready message, in jiffies */ + int connect; /* HPS: at least 2 seconds, in seconds */ + unsigned long cl_connect; /* HPS: Client Connect Timeout, in jiffies */ + int client_init; /* HPS: Clients Enumeration Timeout, in seconds */ + unsigned long pgi; /* PG Isolation time response, in jiffies */ + unsigned int d0i3; /* D0i3 set/unset max response time, in jiffies */ + unsigned long hbm; /* HBM operation timeout, in jiffies */ + unsigned long mkhi_recv; /* receive timeout, in jiffies */ +}; + /** * struct mei_device - MEI private device struct * @@ -443,6 +469,7 @@ struct mei_fw_version { * @reset_count : number of consecutive resets * @dev_state : device state * @hbm_state : state of host bus message protocol + * @pxp_mode : PXP device mode * @init_clients_timer : HBM init handshake timeout * * @pg_event : power gating event @@ -480,6 +507,8 @@ struct mei_fw_version { * @allow_fixed_address: allow user space to connect a fixed client * @override_fixed_address: force allow fixed address behavior * + * @timeouts: actual timeout values + * * @reset_work : work item for the device reset * @bus_rescan_work : work item for the bus rescan * @@ -524,6 +553,7 @@ struct mei_device { unsigned long reset_count; enum mei_dev_state dev_state; enum mei_hbm_state hbm_state; + enum mei_dev_pxp_mode pxp_mode; u16 init_clients_timer; /* @@ -568,6 +598,8 @@ struct mei_device { bool allow_fixed_address; bool override_fixed_address; + struct mei_dev_timeouts timeouts; + struct work_struct reset_work; struct work_struct bus_rescan_work; @@ -632,6 +664,7 @@ static inline u32 mei_slots2data(int slots) */ void mei_device_init(struct mei_device *dev, struct device *device, + bool slow_fw, const struct mei_hw_ops *hw_ops); int mei_reset(struct mei_device *dev); int mei_start(struct mei_device *dev); diff --git a/drivers/misc/mei/mkhi.h b/drivers/misc/mei/mkhi.h new file mode 100644 index 000000000000..1473ea489666 --- /dev/null +++ b/drivers/misc/mei/mkhi.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2003-2022, Intel Corporation. All rights reserved. + * Intel Management Engine Interface (Intel MEI) Linux driver + */ + +#ifndef _MEI_MKHI_H_ +#define _MEI_MKHI_H_ + +#include <linux/types.h> + +#define MKHI_FEATURE_PTT 0x10 + +#define MKHI_FWCAPS_GROUP_ID 0x3 +#define MKHI_FWCAPS_SET_OS_VER_APP_RULE_CMD 6 +#define MKHI_GEN_GROUP_ID 0xFF +#define MKHI_GEN_GET_FW_VERSION_CMD 0x2 + +#define MKHI_GROUP_ID_GFX 0x30 +#define MKHI_GFX_RESET_WARN_CMD_REQ 0x0 +#define MKHI_GFX_MEMORY_READY_CMD_REQ 0x1 + +/* Allow transition to PXP mode without approval */ +#define MKHI_GFX_MEM_READY_PXP_ALLOWED 0x1 + +struct mkhi_rule_id { + __le16 rule_type; + u8 feature_id; + u8 reserved; +} __packed; + +struct mkhi_fwcaps { + struct mkhi_rule_id id; + u8 len; + u8 data[]; +} __packed; + +struct mkhi_msg_hdr { + u8 group_id; + u8 command; + u8 reserved; + u8 result; +} __packed; + +struct mkhi_msg { + struct mkhi_msg_hdr hdr; + u8 data[]; +} __packed; + +struct mkhi_gfx_mem_ready { + struct mkhi_msg_hdr hdr; + u32 flags; +} __packed; + +#endif /* _MEI_MKHI_H_ */ diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 5435604327a7..704cd0caa172 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (c) 2003-2020, Intel Corporation. All rights reserved. + * Copyright (c) 2003-2022, Intel Corporation. All rights reserved. * Intel Management Engine Interface (Intel MEI) Linux driver */ @@ -203,7 +203,7 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } /* allocates and initializes the mei dev structure */ - dev = mei_me_dev_init(&pdev->dev, cfg); + dev = mei_me_dev_init(&pdev->dev, cfg, false); if (!dev) { err = -ENOMEM; goto end; diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index 278031aa2e84..4a4c190f7698 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -641,6 +641,7 @@ INTEL_VGA_DEVICE(0x4682, info), \ INTEL_VGA_DEVICE(0x4688, info), \ INTEL_VGA_DEVICE(0x468A, info), \ + INTEL_VGA_DEVICE(0x468B, info), \ INTEL_VGA_DEVICE(0x4690, info), \ INTEL_VGA_DEVICE(0x4692, info), \ INTEL_VGA_DEVICE(0x4693, info) diff --git a/include/linux/mei_aux.h b/include/linux/mei_aux.h index 587f25128848..506912ad363b 100644 --- a/include/linux/mei_aux.h +++ b/include/linux/mei_aux.h @@ -7,10 +7,22 @@ #include <linux/auxiliary_bus.h> +/** + * struct mei_aux_device - mei auxiliary device + * @aux_dev: - auxiliary device object + * @irq: interrupt driving the mei auxiliary device + * @bar: mmio resource bar reserved to mei auxiliary device + * @ext_op_mem: resource for extend operational memory + * used in graphics PXP mode. + * @slow_firmware: The device has slow underlying firmware. + * Such firmware will require to use larger operation timeouts. + */ struct mei_aux_device { struct auxiliary_device aux_dev; int irq; struct resource bar; + struct resource ext_op_mem; + bool slow_firmware; }; #define auxiliary_dev_to_mei_aux_dev(auxiliary_dev) \ |